2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2013-12-19 22:54:32 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2016-10-26 02:20:24 +00:00
|
|
|
"bytes"
|
2022-12-14 15:24:22 +00:00
|
|
|
"context"
|
2020-09-11 16:43:29 +00:00
|
|
|
"fmt"
|
2014-01-01 00:46:56 +00:00
|
|
|
"net"
|
2013-12-19 22:54:32 +00:00
|
|
|
"os"
|
2021-08-24 21:28:44 +00:00
|
|
|
"strings"
|
2015-08-13 01:48:15 +00:00
|
|
|
"sync"
|
2013-12-19 22:54:32 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2014-08-27 02:26:55 +00:00
|
|
|
|
2021-08-24 21:28:44 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
2022-03-18 10:46:58 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
|
|
|
|
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/stream"
|
2022-09-09 14:02:01 +00:00
|
|
|
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
2022-07-13 15:33:48 +00:00
|
|
|
grpc "github.com/hashicorp/consul/agent/grpc-internal"
|
2023-01-05 10:21:27 +00:00
|
|
|
"github.com/hashicorp/consul/agent/grpc-internal/balancer"
|
2022-07-13 15:33:48 +00:00
|
|
|
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
|
2020-09-14 22:31:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/pool"
|
2020-09-11 16:43:29 +00:00
|
|
|
"github.com/hashicorp/consul/agent/router"
|
2022-04-06 21:33:05 +00:00
|
|
|
"github.com/hashicorp/consul/agent/rpc/middleware"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-09-14 22:31:07 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-25 16:26:33 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2020-04-27 16:17:38 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2013-12-19 22:54:32 +00:00
|
|
|
)
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
func testClientConfig(t *testing.T) (string, *Config) {
|
2017-05-12 13:41:13 +00:00
|
|
|
dir := testutil.TempDir(t, "consul")
|
2013-12-19 22:54:32 +00:00
|
|
|
config := DefaultConfig()
|
2019-08-27 21:16:41 +00:00
|
|
|
|
2021-11-27 20:27:59 +00:00
|
|
|
ports := freeport.GetN(t, 2)
|
2014-04-07 21:36:32 +00:00
|
|
|
config.Datacenter = "dc1"
|
2013-12-19 22:54:32 +00:00
|
|
|
config.DataDir = dir
|
2017-06-26 12:23:09 +00:00
|
|
|
config.NodeName = uniqueNodeName(t.Name())
|
2014-01-01 00:46:56 +00:00
|
|
|
config.RPCAddr = &net.TCPAddr{
|
|
|
|
IP: []byte{127, 0, 0, 1},
|
2019-08-27 21:16:41 +00:00
|
|
|
Port: ports[0],
|
2014-01-01 00:46:56 +00:00
|
|
|
}
|
2013-12-19 22:54:32 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
2019-08-27 21:16:41 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindPort = ports[1]
|
2013-12-19 22:54:32 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 200 * time.Millisecond
|
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeInterval = time.Second
|
|
|
|
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
2014-04-07 21:36:32 +00:00
|
|
|
return dir, config
|
|
|
|
}
|
|
|
|
|
|
|
|
func testClient(t *testing.T) (string, *Client) {
|
2017-06-26 12:23:09 +00:00
|
|
|
return testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
})
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func testClientDC(t *testing.T, dc string) (string, *Client) {
|
2017-06-26 12:23:09 +00:00
|
|
|
return testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
})
|
2013-12-19 22:54:32 +00:00
|
|
|
}
|
|
|
|
|
2020-05-20 09:31:19 +00:00
|
|
|
func testClientWithConfigWithErr(t *testing.T, cb func(c *Config)) (string, *Client, error) {
|
2017-06-26 12:23:09 +00:00
|
|
|
dir, config := testClientConfig(t)
|
|
|
|
if cb != nil {
|
|
|
|
cb(config)
|
|
|
|
}
|
2020-04-27 16:17:38 +00:00
|
|
|
|
2021-08-25 18:43:11 +00:00
|
|
|
// Apply config to copied fields because many tests only set the old
|
2022-04-21 20:21:35 +00:00
|
|
|
// values.
|
2021-08-25 18:43:11 +00:00
|
|
|
config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled
|
|
|
|
config.ACLResolverSettings.NodeName = config.NodeName
|
|
|
|
config.ACLResolverSettings.Datacenter = config.Datacenter
|
|
|
|
config.ACLResolverSettings.EnterpriseMeta = *config.AgentEnterpriseMeta()
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
client, err := NewClient(config, newDefaultDeps(t, config))
|
2020-05-20 09:31:19 +00:00
|
|
|
return dir, client, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func testClientWithConfig(t *testing.T, cb func(c *Config)) (string, *Client) {
|
|
|
|
dir, client, err := testClientWithConfigWithErr(t, cb)
|
|
|
|
if err != nil {
|
2014-08-27 02:26:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
return dir, client
|
|
|
|
}
|
|
|
|
|
2013-12-19 22:54:32 +00:00
|
|
|
func TestClient_StartStop(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 22:54:32 +00:00
|
|
|
dir, client := testClient(t)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
|
|
|
if err := client.Shutdown(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2013-12-19 22:56:38 +00:00
|
|
|
|
|
|
|
func TestClient_JoinLAN(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 22:56:38 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2019-02-22 20:40:59 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2013-12-19 22:56:38 +00:00
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2019-02-22 20:40:59 +00:00
|
|
|
testrpc.WaitForTestAgent(t, c1.RPC, "dc1")
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-05-09 05:31:41 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
2017-05-05 11:58:13 +00:00
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2013-12-19 22:56:38 +00:00
|
|
|
}
|
2013-12-19 23:08:55 +00:00
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
func TestClient_LANReap(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
|
|
|
|
dir2, c1 := testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
c.SerfLANConfig.ReconnectTimeout = 250 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
c.SerfLANConfig.TombstoneTimeout = 250 * time.Millisecond
|
2019-03-04 14:19:35 +00:00
|
|
|
c.SerfLANConfig.ReapInterval = 500 * time.Millisecond
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
testrpc.WaitForLeader(t, c1.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
require.Len(r, s1.LANMembersInAgentPartition(), 2)
|
|
|
|
require.Len(r, c1.LANMembersInAgentPartition(), 2)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has both
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
server := c1.router.FindLANServer()
|
2020-11-05 16:18:59 +00:00
|
|
|
require.NotNil(r, server)
|
|
|
|
require.Equal(r, s1.config.NodeName, server.Name)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the second dc
|
|
|
|
s1.Shutdown()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
require.Len(r, c1.LANMembersInAgentPartition(), 1)
|
2020-08-27 15:23:52 +00:00
|
|
|
server := c1.router.FindLANServer()
|
2019-03-04 14:19:35 +00:00
|
|
|
require.Nil(t, server)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-01-06 23:48:46 +00:00
|
|
|
func TestClient_JoinLAN_Invalid(t *testing.T) {
|
2021-07-22 18:58:08 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-01-06 23:48:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClientDC(t, "other")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2021-10-26 20:08:55 +00:00
|
|
|
if _, err := c1.JoinLAN([]string{joinAddrLAN(s1)}, nil); err == nil {
|
2017-05-05 10:29:49 +00:00
|
|
|
t.Fatal("should error")
|
2015-01-06 23:48:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(s1.LANMembersInAgentPartition()) != 1 {
|
2015-01-06 23:48:46 +00:00
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c1.LANMembersInAgentPartition()) != 1 {
|
2015-01-06 23:48:46 +00:00
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_JoinWAN_Invalid(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-01-06 23:48:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClientDC(t, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2021-10-26 20:08:55 +00:00
|
|
|
if _, err := c1.JoinLAN([]string{joinAddrWAN(s1)}, nil); err == nil {
|
2017-05-05 10:29:49 +00:00
|
|
|
t.Fatal("should error")
|
2015-01-06 23:48:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
if len(s1.WANMembers()) != 1 {
|
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c1.LANMembersInAgentPartition()) != 1 {
|
2015-01-06 23:48:46 +00:00
|
|
|
t.Fatalf("should not join")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-19 23:08:55 +00:00
|
|
|
func TestClient_RPC(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 23:08:55 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try an RPC
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != structs.ErrNoServers {
|
2013-12-19 23:08:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2013-12-19 23:08:55 +00:00
|
|
|
|
|
|
|
// Check the members
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(s1.LANMembersInAgentPartition()) != 2 {
|
2013-12-19 23:08:55 +00:00
|
|
|
t.Fatalf("bad len")
|
|
|
|
}
|
|
|
|
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c1.LANMembersInAgentPartition()) != 2 {
|
2013-12-19 23:08:55 +00:00
|
|
|
t.Fatalf("bad len")
|
|
|
|
}
|
|
|
|
|
2014-05-09 00:29:51 +00:00
|
|
|
// RPC should succeed
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatal("ping failed", err)
|
|
|
|
}
|
|
|
|
})
|
2013-12-19 23:08:55 +00:00
|
|
|
}
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2017-10-10 22:19:50 +00:00
|
|
|
type leaderFailer struct {
|
|
|
|
totalCalls int
|
|
|
|
onceCalls int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *leaderFailer) Always(args struct{}, reply *struct{}) error {
|
|
|
|
l.totalCalls++
|
|
|
|
return structs.ErrNoLeader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *leaderFailer) Once(args struct{}, reply *struct{}) error {
|
|
|
|
l.totalCalls++
|
|
|
|
l.onceCalls++
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case l.onceCalls == 1:
|
|
|
|
return structs.ErrNoLeader
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_RPC_Retry(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-10-10 22:19:50 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
|
|
|
c.RPCHoldTimeout = 2 * time.Second
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2017-10-10 22:19:50 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
failer := &leaderFailer{}
|
|
|
|
if err := s1.RegisterEndpoint("Fail", failer); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Fail.Always", struct{}{}, &out); !structs.IsErrNoLeader(err) {
|
2017-10-10 22:19:50 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := failer.totalCalls, 2; got < want {
|
|
|
|
t.Fatalf("got %d want >= %d", got, want)
|
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Fail.Once", struct{}{}, &out); err != nil {
|
2017-10-10 22:19:50 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if got, want := failer.onceCalls, 2; got < want {
|
|
|
|
t.Fatalf("got %d want >= %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := failer.totalCalls, 4; got < want {
|
|
|
|
t.Fatalf("got %d want >= %d", got, want)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-13 01:48:15 +00:00
|
|
|
func TestClient_RPC_Pool(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-08-13 01:48:15 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2016-10-25 21:58:45 +00:00
|
|
|
|
|
|
|
// Wait for both agents to finish joining
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2015-08-13 01:48:15 +00:00
|
|
|
|
|
|
|
// Blast out a bunch of RPC requests at the same time to try to get
|
|
|
|
// contention opening new connections.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 150; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var out struct{}
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatal("ping failed", err)
|
|
|
|
}
|
|
|
|
})
|
2015-08-13 01:48:15 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-03-27 06:52:06 +00:00
|
|
|
func TestClient_RPC_ConsulServerPing(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-03-27 06:52:06 +00:00
|
|
|
var servers []*Server
|
|
|
|
const numServers = 5
|
|
|
|
|
2017-07-05 10:37:19 +00:00
|
|
|
for n := 0; n < numServers; n++ {
|
|
|
|
bootstrap := n == 0
|
2016-03-27 06:52:06 +00:00
|
|
|
dir, s := testServerDCBootstrap(t, "dc1", bootstrap)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer s.Shutdown()
|
|
|
|
|
|
|
|
servers = append(servers, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
const numClients = 1
|
|
|
|
clientDir, c := testClient(t)
|
|
|
|
defer os.RemoveAll(clientDir)
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
// Join all servers.
|
|
|
|
for _, s := range servers {
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c, s)
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
|
2017-07-05 10:37:19 +00:00
|
|
|
for _, s := range servers {
|
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, numServers)) })
|
|
|
|
}
|
|
|
|
|
2016-03-27 06:52:06 +00:00
|
|
|
// Sleep to allow Serf to sync, shuffle, and let the shuffle complete
|
2020-08-27 15:23:52 +00:00
|
|
|
c.router.GetLANManager().ResetRebalanceTimer()
|
2017-07-05 10:37:19 +00:00
|
|
|
time.Sleep(time.Second)
|
2016-03-27 06:52:06 +00:00
|
|
|
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(c.LANMembersInAgentPartition()) != numServers+numClients {
|
|
|
|
t.Errorf("bad len: %d", len(c.LANMembersInAgentPartition()))
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
for _, s := range servers {
|
2021-10-26 20:08:55 +00:00
|
|
|
if len(s.LANMembersInAgentPartition()) != numServers+numClients {
|
|
|
|
t.Errorf("bad len: %d", len(s.LANMembersInAgentPartition()))
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ping each server in the list
|
|
|
|
var pingCount int
|
|
|
|
for range servers {
|
2017-07-05 10:37:19 +00:00
|
|
|
time.Sleep(200 * time.Millisecond)
|
2020-08-27 15:23:52 +00:00
|
|
|
m, s := c.router.FindLANRoute()
|
2020-05-28 07:48:34 +00:00
|
|
|
ok, err := c.connPool.Ping(s.Datacenter, s.ShortName, s.Addr)
|
2016-03-28 20:38:58 +00:00
|
|
|
if !ok {
|
|
|
|
t.Errorf("Unable to ping server %v: %s", s.String(), err)
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
2017-04-20 19:00:03 +00:00
|
|
|
pingCount++
|
2016-03-28 21:12:41 +00:00
|
|
|
|
|
|
|
// Artificially fail the server in order to rotate the server
|
|
|
|
// list
|
2020-08-27 15:23:52 +00:00
|
|
|
m.NotifyFailedServer(s)
|
2016-03-27 06:52:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if pingCount != numServers {
|
|
|
|
t.Errorf("bad len: %d/%d", pingCount, numServers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-07 21:36:32 +00:00
|
|
|
func TestClient_RPC_TLS(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf1.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
conf1.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2014-04-07 21:36:32 +00:00
|
|
|
configureTLS(conf1)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2014-04-07 21:36:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testClientConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf2.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2014-04-07 21:36:32 +00:00
|
|
|
configureTLS(conf2)
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf2)
|
2014-04-07 21:36:32 +00:00
|
|
|
|
|
|
|
// Try an RPC
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != structs.ErrNoServers {
|
2014-04-07 21:36:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2016-10-26 00:28:45 +00:00
|
|
|
// Wait for joins to finish/RPC to succeed
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
2016-10-26 00:28:45 +00:00
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
2016-10-26 00:28:45 +00:00
|
|
|
}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatal("ping failed", err)
|
|
|
|
}
|
|
|
|
})
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
2014-08-27 02:26:55 +00:00
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
func newClient(t *testing.T, config *Config) *Client {
|
|
|
|
t.Helper()
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
client, err := NewClient(config, newDefaultDeps(t, config))
|
2020-08-07 21:28:16 +00:00
|
|
|
require.NoError(t, err, "failed to create client")
|
|
|
|
t.Cleanup(func() {
|
|
|
|
client.Shutdown()
|
|
|
|
})
|
|
|
|
return client
|
2020-07-29 19:26:15 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 21:28:44 +00:00
|
|
|
func newTestResolverConfig(t *testing.T, suffix string) resolver.Config {
|
|
|
|
n := t.Name()
|
|
|
|
s := strings.Replace(n, "/", "", -1)
|
|
|
|
s = strings.Replace(s, "_", "", -1)
|
|
|
|
return resolver.Config{Authority: strings.ToLower(s) + "-" + suffix}
|
|
|
|
}
|
|
|
|
|
2020-09-14 22:31:07 +00:00
|
|
|
func newDefaultDeps(t *testing.T, c *Config) Deps {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
|
|
|
Name: c.NodeName,
|
2022-06-07 21:29:09 +00:00
|
|
|
Level: testutil.TestLogLevel,
|
2020-09-14 22:31:07 +00:00
|
|
|
Output: testutil.NewLogBuffer(t),
|
|
|
|
})
|
|
|
|
|
2021-07-09 22:17:42 +00:00
|
|
|
tls, err := tlsutil.NewConfigurator(c.TLSConfig, logger)
|
2020-09-14 22:31:07 +00:00
|
|
|
require.NoError(t, err, "failed to create tls configuration")
|
|
|
|
|
2023-01-05 10:21:27 +00:00
|
|
|
resolverBuilder := resolver.NewServerResolverBuilder(newTestResolverConfig(t, c.NodeName+"-"+c.Datacenter))
|
|
|
|
resolver.Register(resolverBuilder)
|
2023-02-28 10:18:38 +00:00
|
|
|
t.Cleanup(func() {
|
|
|
|
resolver.Deregister(resolverBuilder.Authority())
|
|
|
|
})
|
2023-01-05 10:21:27 +00:00
|
|
|
|
|
|
|
balancerBuilder := balancer.NewBuilder(resolverBuilder.Authority(), testutil.Logger(t))
|
|
|
|
balancerBuilder.Register()
|
2023-02-28 10:18:38 +00:00
|
|
|
t.Cleanup(balancerBuilder.Deregister)
|
2023-01-05 10:21:27 +00:00
|
|
|
|
|
|
|
r := router.NewRouter(
|
|
|
|
logger,
|
|
|
|
c.Datacenter,
|
|
|
|
fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter),
|
|
|
|
grpc.NewTracker(resolverBuilder, balancerBuilder),
|
|
|
|
)
|
2020-09-14 22:31:07 +00:00
|
|
|
|
|
|
|
connPool := &pool.ConnPool{
|
2022-04-21 20:21:35 +00:00
|
|
|
Server: false,
|
|
|
|
SrcAddr: c.RPCSrcAddr,
|
|
|
|
Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
|
|
|
MaxTime: 2 * time.Minute,
|
|
|
|
MaxStreams: 4,
|
|
|
|
TLSConfigurator: tls,
|
|
|
|
Datacenter: c.Datacenter,
|
|
|
|
DefaultQueryTime: c.DefaultQueryTime,
|
|
|
|
MaxQueryTime: c.MaxQueryTime,
|
2022-11-24 15:13:02 +00:00
|
|
|
RPCHoldTimeout: c.RPCHoldTimeout,
|
2020-09-14 22:31:07 +00:00
|
|
|
}
|
2022-10-18 19:05:09 +00:00
|
|
|
connPool.SetRPCClientTimeout(c.RPCClientTimeout)
|
2020-09-14 22:31:07 +00:00
|
|
|
return Deps{
|
proxycfg: server-local config entry data sources
This is the OSS portion of enterprise PR 2056.
This commit provides server-local implementations of the proxycfg.ConfigEntry
and proxycfg.ConfigEntryList interfaces, that source data from streaming events.
It makes use of the LocalMaterializer type introduced for peering replication,
adding the necessary support for authorization.
It also adds support for "wildcard" subscriptions (within a topic) to the event
publisher, as this is needed to fetch service-resolvers for all services when
configuring mesh gateways.
Currently, events will be emitted for just the ingress-gateway, service-resolver,
and mesh config entry types, as these are the only entries required by proxycfg
— the events will be emitted on topics named IngressGateway, ServiceResolver,
and MeshConfig topics respectively.
Though these events will only be consumed "locally" for now, they can also be
consumed via the gRPC endpoint (confirmed using grpcurl) so using them from
client agents should be a case of swapping the LocalMaterializer for an
RPCMaterializer.
2022-07-01 15:09:47 +00:00
|
|
|
EventPublisher: stream.NewEventPublisher(10 * time.Second),
|
2020-09-14 22:31:07 +00:00
|
|
|
Logger: logger,
|
|
|
|
TLSConfigurator: tls,
|
|
|
|
Tokens: new(token.Store),
|
|
|
|
Router: r,
|
|
|
|
ConnPool: connPool,
|
2021-08-24 21:28:44 +00:00
|
|
|
GRPCConnPool: grpc.NewClientConnPool(grpc.ClientConnPoolConfig{
|
2023-01-05 10:21:27 +00:00
|
|
|
Servers: resolverBuilder,
|
2021-08-24 21:28:44 +00:00
|
|
|
TLSWrapper: grpc.TLSWrapper(tls.OutgoingRPCWrapper()),
|
|
|
|
UseTLSForDC: tls.UseTLS,
|
|
|
|
DialingFromServer: true,
|
|
|
|
DialingFromDatacenter: c.Datacenter,
|
|
|
|
}),
|
2023-01-05 10:21:27 +00:00
|
|
|
LeaderForwarder: resolverBuilder,
|
2022-04-06 21:33:05 +00:00
|
|
|
NewRequestRecorderFunc: middleware.NewRequestRecorder,
|
|
|
|
GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor,
|
|
|
|
EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c),
|
2022-09-09 14:02:01 +00:00
|
|
|
XDSStreamLimiter: limiter.NewSessionLimiter(),
|
2020-09-14 22:31:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
func TestClient_RPC_RateLimit(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2017-09-01 22:02:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testClientConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
conf2.RPCRateLimit = 2
|
2017-09-01 22:02:50 +00:00
|
|
|
conf2.RPCMaxBurst = 2
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf2)
|
2017-09-01 22:02:50 +00:00
|
|
|
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != structs.ErrRPCRateExceeded {
|
2017-09-01 22:02:50 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
func TestClient_SnapshotRPC(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Wait for the leader
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2018-10-19 08:53:19 +00:00
|
|
|
testrpc.WaitForLeader(t, c1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Wait until we've got a healthy server.
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-05-09 05:31:41 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
2017-05-05 11:58:13 +00:00
|
|
|
}
|
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
var snap bytes.Buffer
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
if err := c1.SnapshotRPC(&args, bytes.NewReader([]byte("")), &snap, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore a snapshot.
|
|
|
|
args.Op = structs.SnapshotRestore
|
|
|
|
if err := c1.SnapshotRPC(&args, &snap, nil, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
func TestClient_SnapshotRPC_RateLimit(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-09-01 22:02:50 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, s1 := testServer(t)
|
2017-09-01 22:02:50 +00:00
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testClientConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
conf1.RPCRateLimit = 2
|
2017-09-01 22:02:50 +00:00
|
|
|
conf1.RPCMaxBurst = 2
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf1)
|
2017-09-01 22:02:50 +00:00
|
|
|
|
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-09-01 22:02:50 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var snap bytes.Buffer
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
if err := c1.SnapshotRPC(&args, bytes.NewReader([]byte("")), &snap, nil); err != structs.ErrRPCRateExceeded {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
func TestClient_SnapshotRPC_TLS(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf1 := testServerConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf1.TLSConfig.InternalRPC.VerifyIncoming = true
|
|
|
|
conf1.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2016-10-26 02:20:24 +00:00
|
|
|
configureTLS(conf1)
|
2020-07-29 20:05:51 +00:00
|
|
|
s1, err := newServer(t, conf1)
|
2016-10-26 02:20:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2020-08-07 21:28:16 +00:00
|
|
|
_, conf2 := testClientConfig(t)
|
2022-03-18 10:46:58 +00:00
|
|
|
conf2.TLSConfig.InternalRPC.VerifyOutgoing = true
|
2016-10-26 02:20:24 +00:00
|
|
|
configureTLS(conf2)
|
2020-08-07 21:28:16 +00:00
|
|
|
c1 := newClient(t, conf2)
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Wait for the leader
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-07-04 16:35:48 +00:00
|
|
|
r.Fatalf("got %d server members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-07-04 16:35:48 +00:00
|
|
|
r.Fatalf("got %d client members want %d", got, want)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until we've got a healthy server.
|
2020-08-27 15:23:52 +00:00
|
|
|
if got, want := c1.router.GetLANManager().NumServers(), 1; got != want {
|
2017-05-09 05:31:41 +00:00
|
|
|
r.Fatalf("got %d servers want %d", got, want)
|
2017-05-05 11:58:13 +00:00
|
|
|
}
|
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
var snap bytes.Buffer
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
if err := c1.SnapshotRPC(&args, bytes.NewReader([]byte("")), &snap, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore a snapshot.
|
|
|
|
args.Op = structs.SnapshotRestore
|
|
|
|
if err := c1.SnapshotRPC(&args, &snap, nil, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 02:26:55 +00:00
|
|
|
func TestClientServer_UserEvent(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-27 02:26:55 +00:00
|
|
|
clientOut := make(chan serf.UserEvent, 2)
|
|
|
|
dir1, c1 := testClientWithConfig(t, func(conf *Config) {
|
|
|
|
conf.UserEventHandler = func(e serf.UserEvent) {
|
|
|
|
clientOut <- e
|
|
|
|
}
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
serverOut := make(chan serf.UserEvent, 2)
|
|
|
|
dir2, s1 := testServerWithConfig(t, func(conf *Config) {
|
|
|
|
conf.UserEventHandler = func(e serf.UserEvent) {
|
|
|
|
serverOut <- e
|
|
|
|
}
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-08-27 02:26:55 +00:00
|
|
|
|
2015-07-14 18:38:12 +00:00
|
|
|
// Wait for the leader
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2015-07-14 18:38:12 +00:00
|
|
|
|
2014-08-27 02:26:55 +00:00
|
|
|
// Check the members
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(s1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d server LAN members want %d", got, want)
|
|
|
|
}
|
2021-10-26 20:08:55 +00:00
|
|
|
if got, want := len(c1.LANMembersInAgentPartition()), 2; got != want {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("got %d client LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-08-27 02:26:55 +00:00
|
|
|
|
|
|
|
// Fire the user event
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
2015-07-14 18:38:12 +00:00
|
|
|
event := structs.EventFireRequest{
|
|
|
|
Name: "foo",
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Payload: []byte("baz"),
|
|
|
|
}
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil); err != nil {
|
2014-08-27 02:26:55 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all the events
|
2015-06-18 23:20:24 +00:00
|
|
|
var clientReceived, serverReceived bool
|
|
|
|
for i := 0; i < 2; i++ {
|
2014-08-27 02:26:55 +00:00
|
|
|
select {
|
|
|
|
case e := <-clientOut:
|
|
|
|
switch e.Name {
|
|
|
|
case "foo":
|
2015-06-18 23:20:24 +00:00
|
|
|
clientReceived = true
|
2014-08-27 02:26:55 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("Bad: %#v", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
case e := <-serverOut:
|
|
|
|
switch e.Name {
|
|
|
|
case "foo":
|
2015-06-18 23:20:24 +00:00
|
|
|
serverReceived = true
|
2014-08-27 02:26:55 +00:00
|
|
|
default:
|
|
|
|
t.Fatalf("Bad: %#v", e)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-18 23:20:24 +00:00
|
|
|
if !serverReceived || !clientReceived {
|
2014-08-27 02:26:55 +00:00
|
|
|
t.Fatalf("missing events")
|
|
|
|
}
|
|
|
|
}
|
2014-10-07 18:05:31 +00:00
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
func TestClient_ReloadConfig(t *testing.T) {
|
|
|
|
_, cfg := testClientConfig(t)
|
2020-09-16 17:29:59 +00:00
|
|
|
cfg.RPCRateLimit = rate.Limit(500)
|
2020-09-16 17:28:03 +00:00
|
|
|
cfg.RPCMaxBurst = 5000
|
|
|
|
deps := newDefaultDeps(t, &Config{NodeName: "node1", Datacenter: "dc1"})
|
|
|
|
c, err := NewClient(cfg, deps)
|
|
|
|
require.NoError(t, err)
|
2023-03-24 16:54:11 +00:00
|
|
|
defer c.Shutdown()
|
2018-06-11 20:23:51 +00:00
|
|
|
|
|
|
|
limiter := c.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(500), limiter.Limit())
|
|
|
|
require.Equal(t, 5000, limiter.Burst())
|
|
|
|
|
2020-09-16 17:28:03 +00:00
|
|
|
rc := ReloadableConfig{
|
|
|
|
RPCRateLimit: 1000,
|
|
|
|
RPCMaxBurst: 10000,
|
|
|
|
RPCMaxConnsPerClient: 0,
|
|
|
|
}
|
|
|
|
require.NoError(t, c.ReloadConfig(rc))
|
2018-06-11 20:23:51 +00:00
|
|
|
|
|
|
|
limiter = c.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(1000), limiter.Limit())
|
|
|
|
require.Equal(t, 10000, limiter.Burst())
|
|
|
|
}
|
2020-10-08 19:02:19 +00:00
|
|
|
|
|
|
|
func TestClient_ShortReconnectTimeout(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2020-10-08 19:02:19 +00:00
|
|
|
cluster := newTestCluster(t, &testClusterConfig{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Servers: 1,
|
|
|
|
Clients: 2,
|
|
|
|
ServerConf: func(c *Config) {
|
|
|
|
c.SerfLANConfig.ReapInterval = 50 * time.Millisecond
|
|
|
|
},
|
|
|
|
ClientConf: func(c *Config) {
|
|
|
|
c.SerfLANConfig.ReapInterval = 50 * time.Millisecond
|
|
|
|
c.AdvertiseReconnectTimeout = 100 * time.Millisecond
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the client
|
|
|
|
cluster.Clients[1].Shutdown()
|
|
|
|
|
|
|
|
// Now wait for it to be reaped. We set the advertised reconnect
|
|
|
|
// timeout to 100ms so we are going to check every 50 ms and allow
|
|
|
|
// up to 10x the time in the case of slow CI.
|
|
|
|
require.Eventually(t,
|
|
|
|
func() bool {
|
2021-10-26 20:08:55 +00:00
|
|
|
return len(cluster.Servers[0].LANMembersInAgentPartition()) == 2 &&
|
|
|
|
len(cluster.Clients[0].LANMembersInAgentPartition()) == 2
|
2020-10-08 19:02:19 +00:00
|
|
|
},
|
|
|
|
time.Second,
|
|
|
|
50*time.Millisecond,
|
|
|
|
"The client node was not reaped within the alotted time")
|
|
|
|
}
|
2022-04-21 20:21:35 +00:00
|
|
|
|
|
|
|
type waiter struct {
|
|
|
|
duration time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *waiter) Wait(struct{}, *struct{}) error {
|
|
|
|
time.Sleep(w.duration)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_RPC_Timeout(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t)
|
|
|
|
|
|
|
|
_, c1 := testClientWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.NodeName = uniqueNodeName(t.Name())
|
2022-10-18 19:05:09 +00:00
|
|
|
c.RPCClientTimeout = 10 * time.Millisecond
|
2022-11-24 15:13:02 +00:00
|
|
|
c.DefaultQueryTime = 150 * time.Millisecond
|
2022-04-21 20:21:35 +00:00
|
|
|
c.MaxQueryTime = 200 * time.Millisecond
|
2022-11-24 15:13:02 +00:00
|
|
|
c.RPCHoldTimeout = 50 * time.Millisecond
|
2022-04-21 20:21:35 +00:00
|
|
|
})
|
2023-03-24 16:54:11 +00:00
|
|
|
defer c1.Shutdown()
|
2022-04-21 20:21:35 +00:00
|
|
|
joinLAN(t, c1, s1)
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c1.RPC(context.Background(), "Status.Ping", struct{}{}, &out); err != nil {
|
2022-04-21 20:21:35 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-10-18 19:05:09 +00:00
|
|
|
require.NoError(t, s1.RegisterEndpoint("Long", &waiter{duration: 100 * time.Millisecond}))
|
|
|
|
require.NoError(t, s1.RegisterEndpoint("Short", &waiter{duration: 5 * time.Millisecond}))
|
2022-04-21 20:21:35 +00:00
|
|
|
|
2022-10-18 19:05:09 +00:00
|
|
|
t.Run("non-blocking query times out after RPCClientTimeout", func(t *testing.T) {
|
2022-11-24 15:13:02 +00:00
|
|
|
// Requests with QueryOptions have a default timeout of
|
|
|
|
// RPCClientTimeout (10ms) so we expect the RPC call to timeout.
|
2022-10-18 19:05:09 +00:00
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
err := c1.RPC(context.Background(), "Long.Wait", &structs.NodeSpecificRequest{}, &out)
|
2022-10-18 19:05:09 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
|
|
|
})
|
2022-04-21 20:21:35 +00:00
|
|
|
|
2022-10-18 19:05:09 +00:00
|
|
|
t.Run("non-blocking query succeeds", func(t *testing.T) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, c1.RPC(context.Background(), "Short.Wait", &structs.NodeSpecificRequest{}, &out))
|
2022-10-18 19:05:09 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("check that deadline does not persist across calls", func(t *testing.T) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
err := c1.RPC(context.Background(), "Long.Wait", &structs.NodeSpecificRequest{}, &out)
|
2022-10-18 19:05:09 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, c1.RPC(context.Background(), "Long.Wait", &structs.NodeSpecificRequest{
|
2022-10-18 19:05:09 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
},
|
|
|
|
}, &out))
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("blocking query succeeds", func(t *testing.T) {
|
|
|
|
var out struct{}
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, c1.RPC(context.Background(), "Long.Wait", &structs.NodeSpecificRequest{
|
2022-10-18 19:05:09 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
},
|
|
|
|
}, &out))
|
|
|
|
})
|
|
|
|
|
2022-11-24 15:13:02 +00:00
|
|
|
t.Run("blocking query with MaxQueryTime succeeds", func(t *testing.T) {
|
2022-10-18 19:05:09 +00:00
|
|
|
var out struct{}
|
2022-11-24 15:13:02 +00:00
|
|
|
// Although we set MaxQueryTime to 100ms, the client is adding maximum
|
|
|
|
// jitter (100ms / 16 = 6.25ms) as well as RPCHoldTimeout (50ms).
|
|
|
|
// Client waits 156.25ms while the server waits 106.25ms (artifically
|
|
|
|
// adds maximum jitter) so the server will always return first.
|
2022-12-14 15:24:22 +00:00
|
|
|
require.NoError(t, c1.RPC(context.Background(), "Long.Wait", &structs.NodeSpecificRequest{
|
2022-11-24 15:13:02 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
MaxQueryTime: 100 * time.Millisecond,
|
|
|
|
},
|
|
|
|
}, &out))
|
|
|
|
})
|
|
|
|
|
|
|
|
// This following scenario should not occur in practice since the server
|
|
|
|
// should be aware of RPC timeouts and always return blocking queries before
|
|
|
|
// the client closes the connection. But this is just a hypothetical case
|
|
|
|
// to show waiter can fail since it does not consider QueryOptions.
|
|
|
|
t.Run("blocking query with low MaxQueryTime fails", func(t *testing.T) {
|
|
|
|
var out struct{}
|
|
|
|
// Although we set MaxQueryTime to 20ms, the client is adding maximum
|
|
|
|
// jitter (20ms / 16 = 1.25ms) as well as RPCHoldTimeout (50ms).
|
|
|
|
// Client waits 71.25ms while the server waits 106.25ms (artifically
|
|
|
|
// adds maximum jitter) so the client will error first.
|
2022-12-14 15:24:22 +00:00
|
|
|
err := c1.RPC(context.Background(), "Long.Wait", &structs.NodeSpecificRequest{
|
2022-10-18 19:05:09 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
MinQueryIndex: 1,
|
|
|
|
MaxQueryTime: 20 * time.Millisecond,
|
|
|
|
},
|
|
|
|
}, &out)
|
|
|
|
require.Error(t, err)
|
2022-11-24 16:44:20 +00:00
|
|
|
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
2022-10-18 19:05:09 +00:00
|
|
|
})
|
2022-04-21 20:21:35 +00:00
|
|
|
}
|