2013-12-19 22:54:32 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2016-10-26 02:20:24 +00:00
|
|
|
"bytes"
|
2020-09-11 16:43:29 +00:00
|
|
|
"fmt"
|
2014-01-01 00:46:56 +00:00
|
|
|
"net"
|
2013-12-19 22:54:32 +00:00
|
|
|
"os"
|
2021-08-24 21:28:44 +00:00
|
|
|
"strings"
|
2015-08-13 01:48:15 +00:00
|
|
|
"sync"
|
2013-12-19 22:54:32 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2014-08-27 02:26:55 +00:00
|
|
|
|
2021-08-24 21:28:44 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
2022-03-18 10:46:58 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
|
|
|
|
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/stream"
|
2022-07-13 15:33:48 +00:00
|
|
|
grpc "github.com/hashicorp/consul/agent/grpc-internal"
|
|
|
|
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
|
2020-09-14 22:31:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/pool"
|
2020-09-11 16:43:29 +00:00
|
|
|
"github.com/hashicorp/consul/agent/router"
|
2022-04-06 21:33:05 +00:00
|
|
|
"github.com/hashicorp/consul/agent/rpc/middleware"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-09-14 22:31:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-25 16:26:33 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2020-04-27 16:17:38 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2013-12-19 22:54:32 +00:00
|
|
|
)
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
func testClientConfig(t *testing.T) (string, *Config) {
|
2017-05-12 13:41:13 +00:00
|
|
|
dir := testutil.TempDir(t, "consul")
|
2013-12-19 22:54:32 +00:00
|
|
|
config := DefaultConfig()
|
2019-08-27 21:16:41 +00:00
|
|
|
|
2021-11-27 20:27:59 +00:00
|
|
|
ports := freeport.GetN(t, 2)
|
2014-04-07 21:36:32 +00:00
|
|
|
config.Datacenter = "dc1"
|
2013-12-19 22:54:32 +00:00
|
|
|
config.DataDir = dir
|
2017-06-26 12:23:09 +00:00
|
|
|
config.NodeName = uniqueNodeName(t.Name())
|
2014-01-01 00:46:56 +00:00
|
|
|
config.RPCAddr = &net.TCPAddr{
|
|
|
|
IP: []byte{127, 0, 0, 1},
|
2019-08-27 21:16:41 +00:00
|
|
|
Port: ports[0],
|
2014-01-01 00:46:56 +00:00
|
|
|
}
|
2013-12-19 22:54:32 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
2019-08-27 21:16:41 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindPort = ports[1]
|
2013-12-19 22:54:32 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 200 * time.Millisecond
|
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeInterval = time.Second
|
|
|
|
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
2022-04-21 20:21:35 +00:00
|
|
|
config.RPCHoldTimeout = 10 * time.Second
|
2014-04-07 21:36:32 +00:00
|
|
|
return dir, config
|
|
|
|
}
|
|
|
|
|
|
|
|
func testClient(t *testing.T) (string, *Client) {
|
2017-06-26 12:23:09 +00:00
|
|
|
return testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
})
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func testClientDC(t *testing.T, dc string) (string, *Client) {
|
2017-06-26 12:23:09 +00:00
|
|
|
return testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
})
|
2013-12-19 22:54:32 +00:00
|
|
|
}
|
|
|
|
|
2020-05-20 09:31:19 +00:00
|
|
|
func testClientWithConfigWithErr(t *testing.T, cb func(c *Config)) (string, *Client, error) {
|
2017-06-26 12:23:09 +00:00
|
|
|
dir, config := testClientConfig(t)
|
|
|
|
if cb != nil {
|
|
|
|
cb(config)
|
|
|
|
}
|
2020-04-27 16:17:38 +00:00
|
|
|
|
2021-08-25 18:43:11 +00:00
|
|
|
// Apply config to copied fields because many tests only set the old
|
2022-04-21 20:21:35 +00:00
|
|
|
// values.
|
2021-08-25 18:43:11 +00:00
|
|
|
config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled
|
|
|
|
config.ACLResolverSettings.NodeName = config.NodeName
|
|
|
|
config.ACLResolverSettings.Datacenter = config.Datacenter
|
|
|
|
config.ACLResolverSettings.EnterpriseMeta = *config.AgentEnterpriseMeta()
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
client, err := NewClient(config, newDefaultDeps(t, config))
|
2020-05-20 09:31:19 +00:00
|
|
|
return dir, client, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func testClientWithConfig(t *testing.T, cb func(c *Config)) (string, *Client) {
|
|
|
|
dir, client, err := testClientWithConfigWithErr(t, cb)
|
|
|
|
if err != nil {
|
2014-08-27 02:26:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
return dir, client
|
|
|
|
}
|
|
|
|
|
2013-12-19 22:54:32 +00:00
|
|
|
func TestClient_StartStop(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 22:54:32 +00:00
|
|
|
dir, client := testClient(t)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
|
|
|
if err := client.Shutdown(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2013-12-19 22:56:38 +00:00
|
|
|
|
|
|
|
func TestClient_JoinLAN(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 22:56:38 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2019-02-22 20:40:59 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2013-12-19 22:56:38 +00:00
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2019-02-22 20:40:59 +00:00
|
|
|
testrpc.WaitForTestAgent(t, c1.RPC, "dc1")
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-05-09 05:31:41 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
2017-05-05 11:58:13 +00:00
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2013-12-19 22:56:38 +00:00
|
|
|
}
|
2013-12-19 23:08:55 +00:00
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
func TestClient_LANReap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
|
|
|
|
dir2, c1 := testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
c.SerfLANConfig.ReconnectTimeout = 250 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
c.SerfLANConfig.TombstoneTimeout = 250 * time.Millisecond
|
2019-03-04 14:19:35 +00:00
|
|
|
c.SerfLANConfig.ReapInterval = 500 * time.Millisecond
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
testrpc.WaitForLeader(t, c1.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
require.Len(r, s1.LANMembersInAgentPartition(), 2)
|
|
|
|
require.Len(r, c1.LANMembersInAgentPartition(), 2)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has both
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
server := c1.router.FindLANServer()
|
2020-11-05 16:18:59 +00:00
|
|
|
require.NotNil(r, server)
|
|
|
|
require.Equal(r, s1.config.NodeName, server.Name)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the second dc
|
|
|
|
s1.Shutdown()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
require.Len(r, c1.LANMembersInAgentPartition(), 1)
|
2020-08-27 15:23:52 +00:00
|
|
|
server := c1.router.FindLANServer()
|
2019-03-04 14:19:35 +00:00
|
|
|
require.Nil(t, server)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-01-06 23:48:46 +00:00
|
|
|
func TestClient_JoinLAN_Invalid(t *testing.T) {
|
2021-07-22 18:58:08 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-01-06 23:48:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClientDC(t, "other")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2021-10-26 20:08:55 +00:00
|
|
|
if _, err := c1.JoinLAN([]string{joinAddrLAN(s1)}, nil); err == nil {
|
2017-05-05 10:29:49 +00:00
|
|
|
t.Fatal("should error")
|
2015-01-06 23:48:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(s1.LANMembersInAgentPartition()) != 1 {
|
2015-01-06 23:48:46 +00:00
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c1.LANMembersInAgentPartition()) != 1 {
|
2015-01-06 23:48:46 +00:00
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_JoinWAN_Invalid(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-01-06 23:48:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClientDC(t, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2021-10-26 20:08:55 +00:00
|
|
|
if _, err := c1.JoinLAN([]string{joinAddrWAN(s1)}, nil); err == nil {
|
2017-05-05 10:29:49 +00:00
|
|
|
t.Fatal("should error")
|
2015-01-06 23:48:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
if len(s1.WANMembers()) != 1 {
|
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c1.LANMembersInAgentPartition()) != 1 {
|
2015-01-06 23:48:46 +00:00
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-19 23:08:55 +00:00
|
|
|
func TestClient_RPC(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 23:08:55 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try an RPC
|
|
|
|
var out struct{}
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != structs.ErrNoServers {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2013-12-19 23:08:55 +00:00
|
|
|
|
|
|
|
// Check the members
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(s1.LANMembersInAgentPartition()) != 2 {
|
2013-12-19 23:08:55 +00:00
|
|
|
t.Fatalf("bad len")
|
|
|
|
}
|
|
|
|
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c1.LANMembersInAgentPartition()) != 2 {
|
2013-12-19 23:08:55 +00:00
|
|
|
t.Fatalf("bad len")
|
|
|
|
}
|
|
|
|
|
2014-05-09 00:29:51 +00:00
|
|
|
// RPC should succeed
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
r.Fatal("ping failed", err)
|
|
|
|
}
|
|
|
|
})
|
2013-12-19 23:08:55 +00:00
|
|
|
}
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2017-10-10 22:19:50 +00:00
|
|
|
type leaderFailer struct {
|
|
|
|
totalCalls int
|
|
|
|
onceCalls int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *leaderFailer) Always(args struct{}, reply *struct{}) error {
|
|
|
|
l.totalCalls++
|
|
|
|
return structs.ErrNoLeader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *leaderFailer) Once(args struct{}, reply *struct{}) error {
|
|
|
|
l.totalCalls++
|
|
|
|
l.onceCalls++
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case l.onceCalls == 1:
|
|
|
|
return structs.ErrNoLeader
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_RPC_Retry(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-10-10 22:19:50 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
c.RPCHoldTimeout = 2 * time.Second
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
failer := &leaderFailer{}
|
|
|
|
if err := s1.RegisterEndpoint("Fail", failer); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
if err := c1.RPC("Fail.Always", struct{}{}, &out); !structs.IsErrNoLeader(err) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := failer.totalCalls, 2; got < want {
|
|
|
|
t.Fatalf("got %d want >= %d", got, want)
|
|
|
|
}
|
|
|
|
if err := c1.RPC("Fail.Once", struct{}{}, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := failer.onceCalls, 2; got < want {
|
|
|
|
t.Fatalf("got %d want >= %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := failer.totalCalls, 4; got < want {
|
|
|
|
t.Fatalf("got %d want >= %d", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-13 01:48:15 +00:00
|
|
|
func TestClient_RPC_Pool(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-08-13 01:48:15 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2016-10-25 21:58:45 +00:00
|
|
|
|
|
|
|
// Wait for both agents to finish joining
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2015-08-13 01:48:15 +00:00
|
|
|
|
|
|
|
// Blast out a bunch of RPC requests at the same time to try to get
|
|
|
|
// contention opening new connections.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 150; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var out struct{}
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
r.Fatal("ping failed", err)
|
|
|
|
}
|
|
|
|
})
|
2015-08-13 01:48:15 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-03-27 06:52:06 +00:00
|
|
|
func TestClient_RPC_ConsulServerPing(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-03-27 06:52:06 +00:00
|
|
|
var servers []*Server
|
|
|
|
const numServers = 5
|
|
|
|
|
2017-07-05 10:37:19 +00:00
|
|
|
for n := 0; n < numServers; n++ {
|
|
|
|
bootstrap := n == 0
|
2016-03-27 06:52:06 +00:00
|
|
|
dir, s := testServerDCBootstrap(t, "dc1", bootstrap)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer s.Shutdown()
|
|
|
|
|
|
|
|
servers = append(servers, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
const numClients = 1
|
|
|
|
clientDir, c := testClient(t)
|
|
|
|
defer os.RemoveAll(clientDir)
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
// Join all servers.
|
|
|
|
for _, s := range servers {
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c, s)
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
|
2017-07-05 10:37:19 +00:00
|
|
|
for _, s := range servers {
|
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, numServers)) })
|
|
|
|
}
|
|
|
|
|
2016-03-27 06:52:06 +00:00
|
|
|
// Sleep to allow Serf to sync, shuffle, and let the shuffle complete
|
2020-08-27 15:23:52 +00:00
|
|
|
c.router.GetLANManager().ResetRebalanceTimer()
|
2017-07-05 10:37:19 +00:00
|
|
|
time.Sleep(time.Second)
|
2016-03-27 06:52:06 +00:00
|
|
|
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c.LANMembersInAgentPartition()) != numServers+numClients {
|
|
|
|
t.Errorf("bad len: %d", len(c.LANMembersInAgentPartition()))
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
for _, s := range servers {
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(s.LANMembersInAgentPartition()) != numServers+numClients {
|
|
|
|
t.Errorf("bad len: %d", len(s.LANMembersInAgentPartition()))
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ping each server in the list
|
|
|
|
var pingCount int
|
|
|
|
for range servers {
|
2017-07-05 10:37:19 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2020-08-27 15:23:52 +00:00
|
|
|
m, s := c.router.FindLANRoute()
|
2020-05-28 07:48:34 +00:00
|
|
|
ok, err := c.connPool.Ping(s.Datacenter, s.ShortName, s.Addr)
|
2016-03-28 20:38:58 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("Unable to ping server %v: %s", s.String(), err)
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
2017-04-20 19:00:03 +00:00
|
|
|
pingCount++
|
2016-03-28 21:12:41 +00:00
|
|
|
|
|
|
|
// Artificially fail the server in order to rotate the server
|
|
|
|
// list
|
2020-08-27 15:23:52 +00:00
|
|
|
m.NotifyFailedServer(s)
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if pingCount != numServers {
|
|
|
|
t.Errorf("bad len: %d/%d", pingCount, numServers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-07 21:36:32 +00:00
|
|
|
func TestClient_RPC_TLS(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf1.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
conf1.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2014-04-07 21:36:32 +00:00
|
|
|
configureTLS(conf1)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2014-04-07 21:36:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testClientConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf2.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2014-04-07 21:36:32 +00:00
|
|
|
configureTLS(conf2)
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf2)
|
2014-04-07 21:36:32 +00:00
|
|
|
|
|
|
|
// Try an RPC
|
|
|
|
var out struct{}
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != structs.ErrNoServers {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2016-10-26 00:28:45 +00:00
|
|
|
// Wait for joins to finish/RPC to succeed
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
2016-10-26 00:28:45 +00:00
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
2016-10-26 00:28:45 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
r.Fatal("ping failed", err)
|
|
|
|
}
|
|
|
|
})
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
2014-08-27 02:26:55 +00:00
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
func newClient(t *testing.T, config *Config) *Client {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
client, err := NewClient(config, newDefaultDeps(t, config))
|
2020-08-07 21:28:16 +00:00
|
|
|
require.NoError(t, err, "failed to create client")
|
|
|
|
t.Cleanup(func() {
|
|
|
|
client.Shutdown()
|
|
|
|
})
|
|
|
|
return client
|
2020-07-29 19:26:15 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 21:28:44 +00:00
|
|
|
func newTestResolverConfig(t *testing.T, suffix string) resolver.Config {
|
|
|
|
n := t.Name()
|
|
|
|
s := strings.Replace(n, "/", "", -1)
|
|
|
|
s = strings.Replace(s, "_", "", -1)
|
|
|
|
return resolver.Config{Authority: strings.ToLower(s) + "-" + suffix}
|
|
|
|
}
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
func newDefaultDeps(t *testing.T, c *Config) Deps {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
|
|
|
Name: c.NodeName,
|
2022-06-07 21:29:09 +00:00
|
|
|
Level: testutil.TestLogLevel,
|
2020-09-14 22:31:07 +00:00
|
|
|
Output: testutil.NewLogBuffer(t),
|
|
|
|
})
|
|
|
|
|
2021-07-09 22:17:42 +00:00
|
|
|
tls, err := tlsutil.NewConfigurator(c.TLSConfig, logger)
|
2020-09-14 22:31:07 +00:00
|
|
|
require.NoError(t, err, "failed to create tls configuration")
|
|
|
|
|
2021-08-24 21:28:44 +00:00
|
|
|
builder := resolver.NewServerResolverBuilder(newTestResolverConfig(t, c.NodeName+"-"+c.Datacenter))
|
2021-07-22 18:58:08 +00:00
|
|
|
r := router.NewRouter(logger, c.Datacenter, fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter), builder)
|
|
|
|
resolver.Register(builder)
|
2020-09-14 22:31:07 +00:00
|
|
|
|
|
|
|
connPool := &pool.ConnPool{
|
2022-04-21 20:21:35 +00:00
|
|
|
Server: false,
|
|
|
|
SrcAddr: c.RPCSrcAddr,
|
|
|
|
Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
|
|
|
MaxTime: 2 * time.Minute,
|
|
|
|
MaxStreams: 4,
|
|
|
|
TLSConfigurator: tls,
|
|
|
|
Datacenter: c.Datacenter,
|
|
|
|
Timeout: c.RPCHoldTimeout,
|
|
|
|
DefaultQueryTime: c.DefaultQueryTime,
|
|
|
|
MaxQueryTime: c.MaxQueryTime,
|
2020-09-14 22:31:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Deps{
|
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
|
|
|
EventPublisher: stream.NewEventPublisher(10 * time.Second),
|
2020-09-14 22:31:07 +00:00
|
|
|
Logger: logger,
|
|
|
|
TLSConfigurator: tls,
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
Router: r,
|
|
|
|
ConnPool: connPool,
|
2021-08-24 21:28:44 +00:00
|
|
|
GRPCConnPool: grpc.NewClientConnPool(grpc.ClientConnPoolConfig{
|
|
|
|
Servers: builder,
|
|
|
|
TLSWrapper: grpc.TLSWrapper(tls.OutgoingRPCWrapper()),
|
|
|
|
UseTLSForDC: tls.UseTLS,
|
|
|
|
DialingFromServer: true,
|
|
|
|
DialingFromDatacenter: c.Datacenter,
|
|
|
|
}),
|
2022-04-06 21:33:05 +00:00
|
|
|
LeaderForwarder: builder,
|
|
|
|
NewRequestRecorderFunc: middleware.NewRequestRecorder,
|
|
|
|
GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor,
|
|
|
|
EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c),
|
2020-09-14 22:31:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
func TestClient_RPC_RateLimit(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2017-09-01 22:02:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testClientConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
conf2.RPCRateLimit = 2
|
2017-09-01 22:02:50 +00:00
|
|
|
conf2.RPCMaxBurst = 2
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf2)
|
2017-09-01 22:02:50 +00:00
|
|
|
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != structs.ErrRPCRateExceeded {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
func TestClient_SnapshotRPC(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Wait for the leader
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2018-10-19 08:53:19 +00:00
|
|
|
testrpc.WaitForLeader(t, c1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Wait until we've got a healthy server.
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-05-09 05:31:41 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
2017-05-05 11:58:13 +00:00
|
|
|
}
|
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
var snap bytes.Buffer
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
if err := c1.SnapshotRPC(&args, bytes.NewReader([]byte("")), &snap, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore a snapshot.
|
|
|
|
args.Op = structs.SnapshotRestore
|
|
|
|
if err := c1.SnapshotRPC(&args, &snap, nil, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
func TestClient_SnapshotRPC_RateLimit(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, s1 := testServer(t)
|
2017-09-01 22:02:50 +00:00
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testClientConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
conf1.RPCRateLimit = 2
|
2017-09-01 22:02:50 +00:00
|
|
|
conf1.RPCMaxBurst = 2
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf1)
|
2017-09-01 22:02:50 +00:00
|
|
|
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-09-01 22:02:50 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var snap bytes.Buffer
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
if err := c1.SnapshotRPC(&args, bytes.NewReader([]byte("")), &snap, nil); err != structs.ErrRPCRateExceeded {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
func TestClient_SnapshotRPC_TLS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf1.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
conf1.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2016-10-26 02:20:24 +00:00
|
|
|
configureTLS(conf1)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2016-10-26 02:20:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testClientConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf2.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2016-10-26 02:20:24 +00:00
|
|
|
configureTLS(conf2)
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf2)
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Wait for the leader
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-07-04 16:35:48 +00:00
|
|
|
r.Fatalf("got %d server members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-07-04 16:35:48 +00:00
|
|
|
r.Fatalf("got %d client members want %d", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until we've got a healthy server.
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-05-09 05:31:41 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
2017-05-05 11:58:13 +00:00
|
|
|
}
|
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
var snap bytes.Buffer
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
if err := c1.SnapshotRPC(&args, bytes.NewReader([]byte("")), &snap, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore a snapshot.
|
|
|
|
args.Op = structs.SnapshotRestore
|
|
|
|
if err := c1.SnapshotRPC(&args, &snap, nil, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 02:26:55 +00:00
|
|
|
func TestClientServer_UserEvent(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-27 02:26:55 +00:00
|
|
|
clientOut := make(chan serf.UserEvent, 2)
|
|
|
|
dir1, c1 := testClientWithConfig(t, func(conf *Config) {
|
|
|
|
conf.UserEventHandler = func(e serf.UserEvent) {
|
|
|
|
clientOut <- e
|
|
|
|
}
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
serverOut := make(chan serf.UserEvent, 2)
|
|
|
|
dir2, s1 := testServerWithConfig(t, func(conf *Config) {
|
|
|
|
conf.UserEventHandler = func(e serf.UserEvent) {
|
|
|
|
serverOut <- e
|
|
|
|
}
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-08-27 02:26:55 +00:00
|
|
|
|
2015-07-14 18:38:12 +00:00
|
|
|
// Wait for the leader
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-07-14 18:38:12 +00:00
|
|
|
|
2014-08-27 02:26:55 +00:00
|
|
|
// Check the members
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-08-27 02:26:55 +00:00
|
|
|
|
|
|
|
// Fire the user event
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
2015-07-14 18:38:12 +00:00
|
|
|
event := structs.EventFireRequest{
|
|
|
|
Name: "foo",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Payload: []byte("baz"),
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil); err != nil {
|
2014-08-27 02:26:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all the events
|
2015-06-18 23:20:24 +00:00
|
|
|
var clientReceived, serverReceived bool
|
|
|
|
for i := 0; i < 2; i++ {
|
2014-08-27 02:26:55 +00:00
|
|
|
select {
|
|
|
|
case e := <-clientOut:
|
|
|
|
switch e.Name {
|
|
|
|
case "foo":
|
2015-06-18 23:20:24 +00:00
|
|
|
clientReceived = true
|
2014-08-27 02:26:55 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("Bad: %#v", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
case e := <-serverOut:
|
|
|
|
switch e.Name {
|
|
|
|
case "foo":
|
2015-06-18 23:20:24 +00:00
|
|
|
serverReceived = true
|
2014-08-27 02:26:55 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("Bad: %#v", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-18 23:20:24 +00:00
|
|
|
if !serverReceived || !clientReceived {
|
2014-08-27 02:26:55 +00:00
|
|
|
t.Fatalf("missing events")
|
|
|
|
}
|
|
|
|
}
|
2014-10-07 18:05:31 +00:00
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
func TestClient_ReloadConfig(t *testing.T) {
|
|
|
|
_, cfg := testClientConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
cfg.RPCRateLimit = rate.Limit(500)
|
2020-09-16 17:28:03 +00:00
|
|
|
cfg.RPCMaxBurst = 5000
|
|
|
|
deps := newDefaultDeps(t, &Config{NodeName: "node1", Datacenter: "dc1"})
|
|
|
|
c, err := NewClient(cfg, deps)
|
|
|
|
require.NoError(t, err)
|
2018-06-11 20:23:51 +00:00
|
|
|
|
|
|
|
limiter := c.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(500), limiter.Limit())
|
|
|
|
require.Equal(t, 5000, limiter.Burst())
|
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
rc := ReloadableConfig{
|
|
|
|
RPCRateLimit: 1000,
|
|
|
|
RPCMaxBurst: 10000,
|
|
|
|
RPCMaxConnsPerClient: 0,
|
|
|
|
}
|
|
|
|
require.NoError(t, c.ReloadConfig(rc))
|
2018-06-11 20:23:51 +00:00
|
|
|
|
|
|
|
limiter = c.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(1000), limiter.Limit())
|
|
|
|
require.Equal(t, 10000, limiter.Burst())
|
|
|
|
}
|
2020-10-08 19:02:19 +00:00
|
|
|
|
|
|
|
func TestClient_ShortReconnectTimeout(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-10-08 19:02:19 +00:00
|
|
|
cluster := newTestCluster(t, &testClusterConfig{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Servers: 1,
|
|
|
|
Clients: 2,
|
|
|
|
ServerConf: func(c *Config) {
|
|
|
|
c.SerfLANConfig.ReapInterval = 50 * time.Millisecond
|
|
|
|
},
|
|
|
|
ClientConf: func(c *Config) {
|
|
|
|
c.SerfLANConfig.ReapInterval = 50 * time.Millisecond
|
|
|
|
c.AdvertiseReconnectTimeout = 100 * time.Millisecond
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the client
|
|
|
|
cluster.Clients[1].Shutdown()
|
|
|
|
|
|
|
|
// Now wait for it to be reaped. We set the advertised reconnect
|
|
|
|
// timeout to 100ms so we are going to check every 50 ms and allow
|
|
|
|
// up to 10x the time in the case of slow CI.
|
|
|
|
require.Eventually(t,
|
|
|
|
func() bool {
|
2021-10-26 20:08:55 +00:00
|
|
|
return len(cluster.Servers[0].LANMembersInAgentPartition()) == 2 &&
|
|
|
|
len(cluster.Clients[0].LANMembersInAgentPartition()) == 2
|
2020-10-08 19:02:19 +00:00
|
|
|
|
|
|
|
},
|
|
|
|
time.Second,
|
|
|
|
50*time.Millisecond,
|
|
|
|
"The client node was not reaped within the alotted time")
|
|
|
|
}
|
2022-04-21 20:21:35 +00:00
|
|
|
|
|
|
|
type waiter struct {
|
|
|
|
duration time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *waiter) Wait(struct{}, *struct{}) error {
|
|
|
|
time.Sleep(w.duration)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_RPC_Timeout(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t)
|
|
|
|
|
|
|
|
_, c1 := testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
c.RPCHoldTimeout = 10 * time.Millisecond
|
|
|
|
c.DefaultQueryTime = 100 * time.Millisecond
|
|
|
|
c.MaxQueryTime = 200 * time.Millisecond
|
|
|
|
})
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
|
|
|
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// waiter will sleep for 50ms
|
|
|
|
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond}))
|
|
|
|
|
|
|
|
// Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms)
|
|
|
|
// so we expect the RPC call to timeout.
|
|
|
|
var out struct{}
|
|
|
|
err := c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{}, &out)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
|
|
|
|
|
|
|
// Blocking requests have a longer timeout (100ms) so this should pass
|
|
|
|
out = struct{}{}
|
|
|
|
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
},
|
|
|
|
}, &out)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// We pass in a custom MaxQueryTime (20ms) through QueryOptions which should fail
|
|
|
|
out = struct{}{}
|
|
|
|
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
MaxQueryTime: 20 * time.Millisecond,
|
|
|
|
},
|
|
|
|
}, &out)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
|
|
|
}
|