2013-12-06 23:43:07 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2019-11-11 20:30:01 +00:00
|
|
|
"bytes"
|
2013-12-07 01:18:09 +00:00
|
|
|
"fmt"
|
2013-12-31 23:44:27 +00:00
|
|
|
"net"
|
2013-12-06 23:43:07 +00:00
|
|
|
"os"
|
2014-10-04 20:43:10 +00:00
|
|
|
"strings"
|
2017-06-26 08:46:20 +00:00
|
|
|
"sync/atomic"
|
2013-12-06 23:43:07 +00:00
|
|
|
"testing"
|
2013-12-10 00:05:15 +00:00
|
|
|
"time"
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/google/tcpproxy"
|
2019-11-11 20:30:01 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect/ca"
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/hashicorp/consul/ipaddr"
|
2019-11-11 20:30:01 +00:00
|
|
|
|
2018-05-10 16:04:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2017-07-06 10:48:37 +00:00
|
|
|
"github.com/hashicorp/consul/agent/metadata"
|
2018-05-10 16:04:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-08-03 22:39:31 +00:00
|
|
|
"github.com/hashicorp/consul/agent/token"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-25 16:26:33 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2019-02-26 15:52:07 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2017-02-22 20:53:32 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2017-02-24 04:32:13 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2019-06-13 09:26:27 +00:00
|
|
|
"golang.org/x/time/rate"
|
2019-03-04 14:19:35 +00:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
2013-12-06 23:43:07 +00:00
|
|
|
)
|
|
|
|
|
2020-01-13 20:51:40 +00:00
|
|
|
const (
|
|
|
|
TestDefaultMasterToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a"
|
|
|
|
)
|
|
|
|
|
|
|
|
// testServerACLConfig wraps another arbitrary Config altering callback
|
|
|
|
// to setup some common ACL configurations. A new callback func will
|
|
|
|
// be returned that has the original callback invoked after setting
|
|
|
|
// up all of the ACL configurations (so they can still be overridden)
|
|
|
|
func testServerACLConfig(cb func(*Config)) func(*Config) {
|
|
|
|
return func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLsEnabled = true
|
|
|
|
c.ACLMasterToken = TestDefaultMasterToken
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
2020-02-10 15:40:44 +00:00
|
|
|
c.ACLEnforceVersion8 = true
|
2020-01-13 20:51:40 +00:00
|
|
|
|
|
|
|
if cb != nil {
|
|
|
|
cb(c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-07 21:36:32 +00:00
|
|
|
func configureTLS(config *Config) {
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
config.CAFile = "../../test/ca/root.cer"
|
|
|
|
config.CertFile = "../../test/key/ourdomain.cer"
|
|
|
|
config.KeyFile = "../../test/key/ourdomain.key"
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
var id int64
|
|
|
|
|
|
|
|
func uniqueNodeName(name string) string {
|
2020-03-09 20:59:02 +00:00
|
|
|
name = strings.ReplaceAll(name, "/", "_")
|
2017-06-26 12:23:09 +00:00
|
|
|
return fmt.Sprintf("%s-node-%d", name, atomic.AddInt64(&id, 1))
|
|
|
|
}
|
|
|
|
|
2019-12-18 18:45:27 +00:00
|
|
|
// This will find the leader of a list of servers and verify that leader establishment has completed
|
|
|
|
func waitForLeaderEstablishment(t *testing.T, servers ...*Server) {
|
|
|
|
t.Helper()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
hasLeader := false
|
|
|
|
for _, srv := range servers {
|
|
|
|
if srv.IsLeader() {
|
|
|
|
hasLeader = true
|
|
|
|
require.True(r, srv.isReadyForConsistentReads(), "Leader %s hasn't finished establishing leadership yet", srv.config.NodeName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.True(r, hasLeader, "Cluster has not elected a leader yet")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
func testServerConfig(t *testing.T) (string, *Config) {
|
2017-05-12 13:41:13 +00:00
|
|
|
dir := testutil.TempDir(t, "consul")
|
2013-12-07 01:18:09 +00:00
|
|
|
config := DefaultConfig()
|
2014-05-26 22:47:47 +00:00
|
|
|
|
2019-08-27 21:16:41 +00:00
|
|
|
ports := freeport.MustTake(3)
|
|
|
|
|
|
|
|
returnPortsFn := func() {
|
|
|
|
// The method of plumbing this into the server shutdown hook doesn't
|
|
|
|
// cover all exit points, so we insulate this against multiple
|
|
|
|
// invocations and then it's safe to call it a bunch of times.
|
|
|
|
freeport.Return(ports)
|
|
|
|
config.NotifyShutdown = nil // self-erasing
|
|
|
|
}
|
|
|
|
config.NotifyShutdown = returnPortsFn
|
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
config.NodeName = uniqueNodeName(t.Name())
|
2014-04-07 20:13:23 +00:00
|
|
|
config.Bootstrap = true
|
|
|
|
config.Datacenter = "dc1"
|
2013-12-07 01:18:09 +00:00
|
|
|
config.DataDir = dir
|
2019-02-01 15:21:54 +00:00
|
|
|
config.LogOutput = testutil.TestWriter(t)
|
2017-06-25 19:36:03 +00:00
|
|
|
|
|
|
|
// bind the rpc server to a random port. config.RPCAdvertise will be
|
|
|
|
// set to the listen address unless it was set in the configuration.
|
|
|
|
// In that case get the address from srv.Listener.Addr().
|
2017-09-25 18:40:42 +00:00
|
|
|
config.RPCAddr = &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: ports[0]}
|
2017-06-25 19:36:03 +00:00
|
|
|
|
2017-02-22 20:53:32 +00:00
|
|
|
nodeID, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
2019-08-27 21:16:41 +00:00
|
|
|
returnPortsFn()
|
2017-02-22 20:53:32 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
config.NodeID = types.NodeID(nodeID)
|
2017-06-25 19:36:03 +00:00
|
|
|
|
|
|
|
// set the memberlist bind port to 0 to bind to a random port.
|
|
|
|
// memberlist will update the value of BindPort after bind
|
|
|
|
// to the actual value.
|
2013-12-07 01:18:09 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
2017-09-25 18:40:42 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.BindPort = ports[1]
|
|
|
|
config.SerfLANConfig.MemberlistConfig.AdvertisePort = ports[1]
|
2014-01-10 01:46:33 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.SuspicionMult = 2
|
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
|
|
|
config.SerfLANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
2019-05-15 18:59:33 +00:00
|
|
|
config.SerfLANConfig.MemberlistConfig.DeadNodeReclaimTime = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
|
2013-12-07 01:18:09 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
2017-09-25 18:40:42 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.BindPort = ports[2]
|
|
|
|
config.SerfWANConfig.MemberlistConfig.AdvertisePort = ports[2]
|
2014-01-10 01:46:33 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.SuspicionMult = 2
|
|
|
|
config.SerfWANConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
|
|
|
config.SerfWANConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
2019-05-15 18:59:33 +00:00
|
|
|
config.SerfWANConfig.MemberlistConfig.DeadNodeReclaimTime = 100 * time.Millisecond
|
2013-12-11 22:57:40 +00:00
|
|
|
|
2017-06-26 14:29:49 +00:00
|
|
|
config.RaftConfig.LeaderLeaseTimeout = 100 * time.Millisecond
|
|
|
|
config.RaftConfig.HeartbeatTimeout = 200 * time.Millisecond
|
|
|
|
config.RaftConfig.ElectionTimeout = 200 * time.Millisecond
|
2013-12-07 01:18:09 +00:00
|
|
|
|
2017-06-26 14:29:49 +00:00
|
|
|
config.ReconcileInterval = 300 * time.Millisecond
|
2015-05-14 01:22:34 +00:00
|
|
|
|
2017-03-21 23:36:44 +00:00
|
|
|
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
|
|
|
|
config.ServerHealthInterval = 50 * time.Millisecond
|
|
|
|
config.AutopilotInterval = 100 * time.Millisecond
|
|
|
|
|
2018-11-12 23:38:53 +00:00
|
|
|
config.Build = "1.4.0"
|
2017-03-21 23:36:44 +00:00
|
|
|
|
2015-06-30 21:25:40 +00:00
|
|
|
config.CoordinateUpdatePeriod = 100 * time.Millisecond
|
2017-10-10 22:19:50 +00:00
|
|
|
config.LeaveDrainTime = 1 * time.Millisecond
|
|
|
|
|
|
|
|
// TODO (slackpad) - We should be able to run all tests w/o this, but it
|
|
|
|
// looks like several depend on it.
|
|
|
|
config.RPCHoldTimeout = 5 * time.Second
|
|
|
|
|
2018-04-27 06:02:18 +00:00
|
|
|
config.ConnectEnabled = true
|
2018-05-10 16:04:33 +00:00
|
|
|
config.CAConfig = &structs.CAConfiguration{
|
|
|
|
ClusterID: connect.TestClusterID,
|
|
|
|
Provider: structs.ConsulCAProvider,
|
2018-05-10 16:27:42 +00:00
|
|
|
Config: map[string]interface{}{
|
2020-01-17 22:27:13 +00:00
|
|
|
"PrivateKey": "",
|
|
|
|
"RootCert": "",
|
|
|
|
"RotationPeriod": "2160h",
|
|
|
|
"LeafCertTTL": "72h",
|
2020-02-10 23:05:49 +00:00
|
|
|
"IntermediateCertTTL": "288h",
|
2018-05-10 16:27:42 +00:00
|
|
|
},
|
2018-05-10 16:04:33 +00:00
|
|
|
}
|
2018-04-27 06:02:18 +00:00
|
|
|
|
2019-08-27 21:16:41 +00:00
|
|
|
config.NotifyShutdown = returnPortsFn
|
|
|
|
|
2014-04-07 20:13:23 +00:00
|
|
|
return dir, config
|
|
|
|
}
|
2014-01-10 19:07:29 +00:00
|
|
|
|
2014-04-07 20:13:23 +00:00
|
|
|
func testServer(t *testing.T) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
})
|
2014-04-07 20:13:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func testServerDC(t *testing.T, dc string) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.Bootstrap = true
|
|
|
|
})
|
2014-04-07 20:13:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.Bootstrap = bootstrap
|
|
|
|
})
|
2013-12-07 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2014-06-16 21:36:12 +00:00
|
|
|
func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
|
2017-06-26 08:44:36 +00:00
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.BootstrapExpect = expect
|
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 00:41:36 +00:00
|
|
|
func testServerDCExpectNonVoter(t *testing.T, dc string, expect int) (string, *Server) {
|
|
|
|
return testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = dc
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.BootstrapExpect = expect
|
|
|
|
c.NonVoter = true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-06-26 08:46:20 +00:00
|
|
|
func testServerWithConfig(t *testing.T, cb func(*Config)) (string, *Server) {
|
2019-07-24 14:41:00 +00:00
|
|
|
var dir string
|
|
|
|
var config *Config
|
2019-07-12 15:52:26 +00:00
|
|
|
var srv *Server
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// Retry added to avoid cases where bind addr is already in use
|
|
|
|
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
|
2019-07-24 14:41:00 +00:00
|
|
|
dir, config = testServerConfig(t)
|
|
|
|
if cb != nil {
|
|
|
|
cb(config)
|
|
|
|
}
|
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
srv, err = newServer(config)
|
|
|
|
if err != nil {
|
2019-08-27 21:16:41 +00:00
|
|
|
config.NotifyShutdown()
|
2019-07-12 15:52:26 +00:00
|
|
|
os.RemoveAll(dir)
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2017-06-26 08:46:20 +00:00
|
|
|
return dir, srv
|
2014-08-11 21:01:45 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 20:51:40 +00:00
|
|
|
// cb is a function that can alter the test servers configuration prior to the server starting.
|
|
|
|
func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToken bool) (string, *Server) {
|
|
|
|
dir, srv := testServerWithConfig(t, testServerACLConfig(cb))
|
|
|
|
|
|
|
|
if initReplicationToken {
|
|
|
|
// setup some tokens here so we get less warnings in the logs
|
|
|
|
srv.tokens.UpdateReplicationToken(TestDefaultMasterToken, token.TokenSourceConfig)
|
|
|
|
}
|
|
|
|
return dir, srv
|
|
|
|
}
|
|
|
|
|
2017-06-25 19:36:03 +00:00
|
|
|
func newServer(c *Config) (*Server, error) {
|
|
|
|
// chain server up notification
|
|
|
|
oldNotify := c.NotifyListen
|
|
|
|
up := make(chan struct{})
|
|
|
|
c.NotifyListen = func() {
|
|
|
|
close(up)
|
|
|
|
if oldNotify != nil {
|
|
|
|
oldNotify()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// start server
|
2017-06-26 08:46:20 +00:00
|
|
|
w := c.LogOutput
|
|
|
|
if w == nil {
|
|
|
|
w = os.Stderr
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
|
|
|
Name: c.NodeName,
|
|
|
|
Level: hclog.Debug,
|
|
|
|
Output: w,
|
|
|
|
})
|
2019-03-13 09:29:06 +00:00
|
|
|
tlsConf, err := tlsutil.NewConfigurator(c.ToTLSUtilConfig(), logger)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
srv, err := NewServerLogger(c, logger, new(token.Store), tlsConf)
|
2017-06-25 19:36:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait until after listen
|
|
|
|
<-up
|
|
|
|
|
|
|
|
// get the real address
|
|
|
|
//
|
|
|
|
// the server already sets the RPCAdvertise address
|
|
|
|
// if it wasn't configured since it needs it for
|
|
|
|
// some initialization
|
|
|
|
//
|
|
|
|
// todo(fs): setting RPCAddr should probably be guarded
|
|
|
|
// todo(fs): but for now it is a shortcut to avoid fixing
|
|
|
|
// todo(fs): tests which depend on that value. They should
|
|
|
|
// todo(fs): just get the listener address instead.
|
|
|
|
c.RPCAddr = srv.Listener.Addr().(*net.TCPAddr)
|
|
|
|
return srv, nil
|
|
|
|
}
|
|
|
|
|
2013-12-06 23:43:07 +00:00
|
|
|
func TestServer_StartStop(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-24 03:04:23 +00:00
|
|
|
// Start up a server and then stop it.
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
if err := s1.Shutdown(); err != nil {
|
2013-12-06 23:43:07 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-24 03:04:23 +00:00
|
|
|
// Shut down again, which should be idempotent.
|
|
|
|
if err := s1.Shutdown(); err != nil {
|
2013-12-06 23:43:07 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 01:18:09 +00:00
|
|
|
|
2019-10-17 15:57:17 +00:00
|
|
|
func TestServer_fixupACLDatacenter(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "aye"
|
|
|
|
c.PrimaryDatacenter = "aye"
|
|
|
|
c.ACLsEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "bee"
|
|
|
|
c.PrimaryDatacenter = "aye"
|
|
|
|
c.ACLsEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s1.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "aye")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "bee")
|
|
|
|
|
|
|
|
require.Equal(t, "aye", s1.config.Datacenter)
|
|
|
|
require.Equal(t, "aye", s1.config.ACLDatacenter)
|
|
|
|
require.Equal(t, "aye", s1.config.PrimaryDatacenter)
|
|
|
|
|
|
|
|
require.Equal(t, "bee", s2.config.Datacenter)
|
|
|
|
require.Equal(t, "aye", s2.config.ACLDatacenter)
|
|
|
|
require.Equal(t, "aye", s2.config.PrimaryDatacenter)
|
|
|
|
}
|
|
|
|
|
2013-12-07 01:18:09 +00:00
|
|
|
func TestServer_JoinLAN(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-07 01:18:09 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s1.LANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s1 LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.LANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2013-12-07 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
func TestServer_LANReap(t *testing.T) {
|
2019-03-05 16:16:31 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
configureServer := func(c *Config) {
|
2019-03-04 14:19:35 +00:00
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
c.SerfLANConfig.ReconnectTimeout = 250 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
c.SerfLANConfig.TombstoneTimeout = 250 * time.Millisecond
|
2019-03-04 14:52:45 +00:00
|
|
|
c.SerfLANConfig.ReapInterval = 300 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
configureServer(c)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = false
|
2019-03-05 16:16:31 +00:00
|
|
|
configureServer(c)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = false
|
2019-03-05 16:16:31 +00:00
|
|
|
configureServer(c)
|
2019-03-04 14:19:35 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
|
|
|
|
2019-03-04 14:52:45 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
2019-03-04 14:19:35 +00:00
|
|
|
testrpc.WaitForLeader(t, s3.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.LANMembers(), 3)
|
|
|
|
require.Len(r, s2.LANMembers(), 3)
|
|
|
|
require.Len(r, s3.LANMembers(), 3)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has both
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.serverLookup.Servers(), 3)
|
|
|
|
require.Len(r, s2.serverLookup.Servers(), 3)
|
|
|
|
require.Len(r, s3.serverLookup.Servers(), 3)
|
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the second dc
|
|
|
|
s2.Shutdown()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.LANMembers(), 2)
|
|
|
|
servers := s1.serverLookup.Servers()
|
|
|
|
require.Len(r, servers, 2)
|
|
|
|
// require.Equal(r, s1.config.NodeName, servers[0].Name)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2013-12-07 01:18:09 +00:00
|
|
|
func TestServer_JoinWAN(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-07 01:18:09 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2013-12-12 00:24:34 +00:00
|
|
|
dir2, s2 := testServerDC(t, "dc2")
|
2013-12-07 01:18:09 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s1.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2013-12-12 00:24:34 +00:00
|
|
|
|
2017-03-14 05:56:24 +00:00
|
|
|
// Check the router has both
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-09 04:57:06 +00:00
|
|
|
if got, want := len(s1.router.GetDatacenters()), 2; got != want {
|
|
|
|
r.Fatalf("got %d routes want %d", got, want)
|
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s2.router.GetDatacenters()), 2; got != want {
|
2017-05-09 04:57:06 +00:00
|
|
|
r.Fatalf("got %d datacenters want %d", got, want)
|
2017-04-29 16:34:02 +00:00
|
|
|
}
|
|
|
|
})
|
2013-12-07 01:18:09 +00:00
|
|
|
}
|
2013-12-10 00:05:15 +00:00
|
|
|
|
2019-03-04 14:19:35 +00:00
|
|
|
func TestServer_WANReap(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
c.SerfWANConfig.ReconnectTimeout = 250 * time.Millisecond
|
2019-03-05 16:16:31 +00:00
|
|
|
c.SerfWANConfig.TombstoneTimeout = 250 * time.Millisecond
|
2019-03-04 14:19:35 +00:00
|
|
|
c.SerfWANConfig.ReapInterval = 500 * time.Millisecond
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDC(t, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
|
|
|
|
// Try to join
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.WANMembers(), 2)
|
|
|
|
require.Len(r, s2.WANMembers(), 2)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has both
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.router.GetDatacenters(), 2)
|
|
|
|
require.Len(r, s2.router.GetDatacenters(), 2)
|
|
|
|
})
|
|
|
|
|
|
|
|
// shutdown the second dc
|
|
|
|
s2.Shutdown()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Len(r, s1.WANMembers(), 1)
|
|
|
|
datacenters := s1.router.GetDatacenters()
|
|
|
|
require.Len(r, datacenters, 1)
|
|
|
|
require.Equal(r, "dc1", datacenters[0])
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-03-15 19:26:54 +00:00
|
|
|
func TestServer_JoinWAN_Flood(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-15 19:26:54 +00:00
|
|
|
// Set up two servers in a WAN.
|
2017-06-26 09:05:44 +00:00
|
|
|
dir1, s1 := testServerDCBootstrap(t, "dc1", true)
|
2017-03-15 19:26:54 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-06-26 09:05:44 +00:00
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc2", true)
|
2017-03-15 19:26:54 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2017-03-15 19:26:54 +00:00
|
|
|
|
2017-04-29 16:34:02 +00:00
|
|
|
for _, s := range []*Server{s1, s2} {
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-03-20 23:23:40 +00:00
|
|
|
}
|
2017-03-15 19:26:54 +00:00
|
|
|
|
2017-06-26 09:05:44 +00:00
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
2017-03-15 19:26:54 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Do just a LAN join for the new server and make sure it
|
|
|
|
// shows up in the WAN.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2017-03-15 19:26:54 +00:00
|
|
|
|
2017-04-29 16:34:02 +00:00
|
|
|
for _, s := range []*Server{s1, s2, s3} {
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s.WANMembers()), 3; got != want {
|
2017-06-26 09:05:44 +00:00
|
|
|
r.Fatalf("got %d WAN members for %s want %d", got, s.config.NodeName, want)
|
2017-04-29 16:34:02 +00:00
|
|
|
}
|
|
|
|
})
|
2017-03-20 23:23:40 +00:00
|
|
|
}
|
2017-03-15 19:26:54 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
// This is a mirror of a similar test in agent/agent_test.go
|
|
|
|
func TestServer_JoinWAN_viaMeshGateway(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
gwPort := freeport.MustTake(1)
|
|
|
|
defer freeport.Return(gwPort)
|
|
|
|
gwAddr := ipaddr.FormatAddressPort("127.0.0.1", gwPort[0])
|
|
|
|
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Domain = "consul"
|
|
|
|
c.NodeName = "bob"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
// tls
|
|
|
|
c.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.CertFile = "../../test/hostname/Bob.crt"
|
|
|
|
c.KeyFile = "../../test/hostname/Bob.key"
|
|
|
|
c.VerifyIncoming = true
|
|
|
|
c.VerifyOutgoing = true
|
|
|
|
c.VerifyServerHostname = true
|
|
|
|
// wanfed
|
|
|
|
c.ConnectMeshGatewayWANFederationEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Domain = "consul"
|
|
|
|
c.NodeName = "betty"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
// tls
|
|
|
|
c.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.CertFile = "../../test/hostname/Betty.crt"
|
|
|
|
c.KeyFile = "../../test/hostname/Betty.key"
|
|
|
|
c.VerifyIncoming = true
|
|
|
|
c.VerifyOutgoing = true
|
|
|
|
c.VerifyServerHostname = true
|
|
|
|
// wanfed
|
|
|
|
c.ConnectMeshGatewayWANFederationEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Domain = "consul"
|
|
|
|
c.NodeName = "bonnie"
|
|
|
|
c.Datacenter = "dc3"
|
|
|
|
c.PrimaryDatacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
// tls
|
|
|
|
c.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.CertFile = "../../test/hostname/Bonnie.crt"
|
|
|
|
c.KeyFile = "../../test/hostname/Bonnie.key"
|
|
|
|
c.VerifyIncoming = true
|
|
|
|
c.VerifyOutgoing = true
|
|
|
|
c.VerifyServerHostname = true
|
|
|
|
// wanfed
|
|
|
|
c.ConnectMeshGatewayWANFederationEnabled = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// We'll use the same gateway for all datacenters since it doesn't care.
|
|
|
|
var p tcpproxy.Proxy
|
|
|
|
p.AddSNIRoute(gwAddr, "bob.server.dc1.consul", tcpproxy.To(s1.config.RPCAddr.String()))
|
|
|
|
p.AddSNIRoute(gwAddr, "betty.server.dc2.consul", tcpproxy.To(s2.config.RPCAddr.String()))
|
|
|
|
p.AddSNIRoute(gwAddr, "bonnie.server.dc3.consul", tcpproxy.To(s3.config.RPCAddr.String()))
|
|
|
|
p.AddStopACMESearch(gwAddr)
|
|
|
|
require.NoError(t, p.Start())
|
|
|
|
defer func() {
|
|
|
|
p.Close()
|
|
|
|
p.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Logf("routing %s => %s", "bob.server.dc1.consul", s1.config.RPCAddr.String())
|
|
|
|
t.Logf("routing %s => %s", "betty.server.dc2.consul", s2.config.RPCAddr.String())
|
|
|
|
t.Logf("routing %s => %s", "bonnie.server.dc3.consul", s3.config.RPCAddr.String())
|
|
|
|
|
|
|
|
// Register this into the catalog in dc1.
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bob",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Service: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
|
|
|
Port: gwPort[0],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, s1.RPC("Catalog.Register", &arg, &out))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to make it into the gateway locator.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s1.gatewayLocator.PickGateway("dc1"))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Seed the secondaries with the address of the primary and wait for that to
|
|
|
|
// be in their locators.
|
|
|
|
s2.RefreshPrimaryGatewayFallbackAddresses([]string{gwAddr})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s2.gatewayLocator.PickGateway("dc1"))
|
|
|
|
})
|
|
|
|
s3.RefreshPrimaryGatewayFallbackAddresses([]string{gwAddr})
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s3.gatewayLocator.PickGateway("dc1"))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try to join from secondary to primary. We can't use joinWAN() because we
|
|
|
|
// are simulating proper bootstrapping and if ACLs were on we would have to
|
|
|
|
// delay gateway registration in the secondary until after one directional
|
|
|
|
// join. So this way we explicitly join secondary-to-primary as a standalone
|
|
|
|
// operation and follow it up later with a full join.
|
|
|
|
_, err := s2.JoinWAN([]string{joinAddrWAN(s1)})
|
|
|
|
require.NoError(t, err)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s2.WANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
_, err = s3.JoinWAN([]string{joinAddrWAN(s1)})
|
|
|
|
require.NoError(t, err)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s3.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d s3 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Now we can register this into the catalog in dc2 and dc3.
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
Node: "betty",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Service: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
|
|
|
Port: gwPort[0],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, s2.RPC("Catalog.Register", &arg, &out))
|
|
|
|
}
|
|
|
|
{
|
|
|
|
arg := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc3",
|
|
|
|
Node: "bonnie",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Kind: structs.ServiceKindMeshGateway,
|
|
|
|
ID: "mesh-gateway",
|
|
|
|
Service: "mesh-gateway",
|
|
|
|
Meta: map[string]string{structs.MetaWANFederationKey: "1"},
|
|
|
|
Port: gwPort[0],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, s3.RPC("Catalog.Register", &arg, &out))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to make it into the gateway locator in dc2 and then for
|
|
|
|
// AE to carry it back to the primary
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.NotEmpty(r, s3.gatewayLocator.PickGateway("dc2"))
|
|
|
|
require.NotEmpty(r, s2.gatewayLocator.PickGateway("dc2"))
|
|
|
|
require.NotEmpty(r, s1.gatewayLocator.PickGateway("dc2"))
|
|
|
|
|
|
|
|
require.NotEmpty(r, s3.gatewayLocator.PickGateway("dc3"))
|
|
|
|
require.NotEmpty(r, s2.gatewayLocator.PickGateway("dc3"))
|
|
|
|
require.NotEmpty(r, s1.gatewayLocator.PickGateway("dc3"))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try to join again using the standard verification method now that
|
|
|
|
// all of the plumbing is in place.
|
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s1.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.WANMembers()), 3; got != want {
|
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check the router has all of them
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s1.router.GetDatacenters()), 3; got != want {
|
|
|
|
r.Fatalf("got %d routes want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.router.GetDatacenters()), 3; got != want {
|
|
|
|
r.Fatalf("got %d datacenters want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s3.router.GetDatacenters()), 3; got != want {
|
|
|
|
r.Fatalf("got %d datacenters want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Ensure we can do some trivial RPC in all directions.
|
|
|
|
servers := map[string]*Server{"dc1": s1, "dc2": s2, "dc3": s3}
|
|
|
|
names := map[string]string{"dc1": "bob", "dc2": "betty", "dc3": "bonnie"}
|
|
|
|
for _, srcDC := range []string{"dc1", "dc2", "dc3"} {
|
|
|
|
srv := servers[srcDC]
|
|
|
|
for _, dstDC := range []string{"dc1", "dc2", "dc3"} {
|
|
|
|
if srcDC == dstDC {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Run(srcDC+" to "+dstDC, func(t *testing.T) {
|
|
|
|
arg := structs.DCSpecificRequest{
|
|
|
|
Datacenter: dstDC,
|
|
|
|
}
|
|
|
|
var out structs.IndexedNodes
|
|
|
|
require.NoError(t, srv.RPC("Catalog.ListNodes", &arg, &out))
|
|
|
|
require.Len(t, out.Nodes, 1)
|
|
|
|
node := out.Nodes[0]
|
|
|
|
require.Equal(t, dstDC, node.Datacenter)
|
|
|
|
require.Equal(t, names[dstDC], node.Node)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-28 09:47:28 +00:00
|
|
|
func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-06-27 22:04:17 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = t.Name() + "-s1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-06-27 22:04:17 +00:00
|
|
|
s2Name := t.Name() + "-s2"
|
2015-03-28 09:47:28 +00:00
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
2017-06-27 22:04:17 +00:00
|
|
|
c.NodeName = s2Name
|
2015-03-28 09:47:28 +00:00
|
|
|
c.Datacenter = "dc2"
|
2017-06-27 22:04:17 +00:00
|
|
|
c.Bootstrap = false
|
2015-03-28 09:47:28 +00:00
|
|
|
// This wan address will be expected to be seen on s1
|
|
|
|
c.SerfWANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.2"
|
|
|
|
// This lan address will be expected to be seen on s3
|
|
|
|
c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.3"
|
2017-06-27 22:04:17 +00:00
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
2015-03-28 09:47:28 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2017-06-27 22:04:17 +00:00
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = t.Name() + "-s3"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.SerfFloodInterval = 100 * time.Millisecond
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Join s2 to s1 on wan
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2015-03-28 09:47:28 +00:00
|
|
|
|
|
|
|
// Join s3 to s2 on lan
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s2)
|
2017-07-05 04:38:42 +00:00
|
|
|
|
|
|
|
// We rely on flood joining to fill across the LAN, so we expect s3 to
|
|
|
|
// show up on the WAN as well, even though it's not explicitly joined.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-07-04 18:02:01 +00:00
|
|
|
if got, want := len(s1.WANMembers()), 3; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
|
|
|
}
|
2017-07-04 18:02:01 +00:00
|
|
|
if got, want := len(s2.WANMembers()), 3; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d s2 WAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s2.LANMembers()), 2; got != want {
|
|
|
|
r.Fatalf("got %d s2 LAN members want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := len(s3.LANMembers()), 2; got != want {
|
2017-06-26 12:22:09 +00:00
|
|
|
r.Fatalf("got %d s3 LAN members want %d", got, want)
|
2017-04-29 16:34:02 +00:00
|
|
|
}
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
|
2017-03-14 05:56:24 +00:00
|
|
|
// Check the router has both
|
2017-05-09 04:57:06 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if len(s1.router.GetDatacenters()) != 2 {
|
|
|
|
r.Fatalf("remote consul missing")
|
|
|
|
}
|
|
|
|
if len(s2.router.GetDatacenters()) != 2 {
|
|
|
|
r.Fatalf("remote consul missing")
|
|
|
|
}
|
2017-08-30 17:31:36 +00:00
|
|
|
if len(s2.serverLookup.Servers()) != 2 {
|
2017-05-09 04:57:06 +00:00
|
|
|
r.Fatalf("local consul fellow s3 for s2 missing")
|
|
|
|
}
|
|
|
|
})
|
2015-03-28 09:47:28 +00:00
|
|
|
|
|
|
|
// Get and check the wan address of s2 from s1
|
|
|
|
var s2WanAddr string
|
|
|
|
for _, member := range s1.WANMembers() {
|
2017-06-27 22:04:17 +00:00
|
|
|
if member.Name == s2Name+".dc2" {
|
2015-03-28 09:47:28 +00:00
|
|
|
s2WanAddr = member.Addr.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s2WanAddr != "127.0.0.2" {
|
|
|
|
t.Fatalf("s1 sees s2 on a wrong address: %s, expecting: %s", s2WanAddr, "127.0.0.2")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get and check the lan address of s2 from s3
|
|
|
|
var s2LanAddr string
|
|
|
|
for _, lanmember := range s3.LANMembers() {
|
2017-06-27 22:04:17 +00:00
|
|
|
if lanmember.Name == s2Name {
|
2015-03-28 09:47:28 +00:00
|
|
|
s2LanAddr = lanmember.Addr.String()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s2LanAddr != "127.0.0.3" {
|
|
|
|
t.Fatalf("s3 sees s2 on a wrong address: %s, expecting: %s", s2LanAddr, "127.0.0.3")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-21 00:30:56 +00:00
|
|
|
func TestServer_LeaveLeader(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-10 00:05:15 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2014-01-30 21:13:29 +00:00
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
2013-12-10 00:05:15 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2017-10-31 20:16:56 +00:00
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
2013-12-10 00:05:15 +00:00
|
|
|
|
2017-06-26 12:35:34 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-10-31 20:16:56 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantPeers(s1, 3))
|
|
|
|
r.Check(wantPeers(s2, 3))
|
|
|
|
r.Check(wantPeers(s3, 3))
|
|
|
|
})
|
2015-01-21 00:30:56 +00:00
|
|
|
// Issue a leave to the leader
|
2017-10-10 22:19:50 +00:00
|
|
|
var leader *Server
|
2017-06-26 12:35:34 +00:00
|
|
|
switch {
|
|
|
|
case s1.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
leader = s1
|
2017-06-26 12:35:34 +00:00
|
|
|
case s2.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
leader = s2
|
2017-10-31 20:16:56 +00:00
|
|
|
case s3.IsLeader():
|
|
|
|
leader = s3
|
2017-06-26 12:35:34 +00:00
|
|
|
default:
|
|
|
|
t.Fatal("no leader")
|
|
|
|
}
|
2017-10-10 22:19:50 +00:00
|
|
|
if err := leader.Leave(); err != nil {
|
2017-06-26 12:35:34 +00:00
|
|
|
t.Fatal("leave failed: ", err)
|
2013-12-10 00:05:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should lose a peer
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-10-31 20:16:56 +00:00
|
|
|
r.Check(wantPeers(s1, 2))
|
|
|
|
r.Check(wantPeers(s2, 2))
|
|
|
|
r.Check(wantPeers(s3, 2))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-01-21 00:30:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_Leave(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2015-01-21 00:30:56 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// Second server not in bootstrap mode
|
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2015-01-21 00:30:56 +00:00
|
|
|
|
2017-06-26 12:35:34 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
2015-01-21 00:30:56 +00:00
|
|
|
|
|
|
|
// Issue a leave to the non-leader
|
2017-10-10 22:19:50 +00:00
|
|
|
var nonleader *Server
|
2017-06-26 12:35:34 +00:00
|
|
|
switch {
|
|
|
|
case s1.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
nonleader = s2
|
2017-06-26 12:35:34 +00:00
|
|
|
case s2.IsLeader():
|
2017-10-10 22:19:50 +00:00
|
|
|
nonleader = s1
|
2017-06-26 12:35:34 +00:00
|
|
|
default:
|
|
|
|
t.Fatal("no leader")
|
|
|
|
}
|
2017-10-10 22:19:50 +00:00
|
|
|
if err := nonleader.Leave(); err != nil {
|
2017-06-26 12:35:34 +00:00
|
|
|
t.Fatal("leave failed: ", err)
|
2015-01-21 00:30:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should lose a peer
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 1))
|
|
|
|
r.Check(wantPeers(s2, 1))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2013-12-10 00:05:15 +00:00
|
|
|
}
|
2013-12-19 23:18:25 +00:00
|
|
|
|
|
|
|
func TestServer_RPC(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2013-12-19 23:18:25 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2014-04-07 21:36:32 +00:00
|
|
|
|
|
|
|
func TestServer_JoinLAN_TLS(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-06-26 12:23:09 +00:00
|
|
|
dir1, conf1 := testServerConfig(t)
|
2014-04-07 21:36:32 +00:00
|
|
|
conf1.VerifyIncoming = true
|
|
|
|
conf1.VerifyOutgoing = true
|
|
|
|
configureTLS(conf1)
|
2017-06-25 19:36:03 +00:00
|
|
|
s1, err := newServer(conf1)
|
2014-04-07 21:36:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2018-11-20 11:27:26 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
2014-04-07 21:36:32 +00:00
|
|
|
|
2017-06-26 12:23:09 +00:00
|
|
|
dir2, conf2 := testServerConfig(t)
|
2014-04-07 21:36:32 +00:00
|
|
|
conf2.Bootstrap = false
|
|
|
|
conf2.VerifyIncoming = true
|
|
|
|
conf2.VerifyOutgoing = true
|
|
|
|
configureTLS(conf2)
|
2017-06-25 19:36:03 +00:00
|
|
|
s2, err := newServer(conf2)
|
2014-04-07 21:36:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2018-11-20 11:27:26 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s2.RPC, "dc1")
|
2017-09-25 22:27:04 +00:00
|
|
|
|
2014-05-26 21:44:37 +00:00
|
|
|
// Verify Raft has established a peer
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-09-25 22:27:04 +00:00
|
|
|
r.Check(wantRaft([]*Server{s1, s2}))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-04-07 21:36:32 +00:00
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
|
|
|
|
func TestServer_Expect(t *testing.T) {
|
2016-09-01 04:22:32 +00:00
|
|
|
// All test servers should be in expect=3 mode, except for the 3rd one,
|
|
|
|
// but one with expect=0 can cause a bootstrap to occur from the other
|
|
|
|
// servers as currently implemented.
|
2014-06-16 21:36:12 +00:00
|
|
|
dir1, s1 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2014-06-18 23:15:28 +00:00
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 0)
|
2014-06-16 21:36:12 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
dir4, s4 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir4)
|
|
|
|
defer s4.Shutdown()
|
|
|
|
|
|
|
|
// Join the first two servers.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Should have no peers yet since the bootstrap didn't occur.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Join the third node.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Now we have three servers so we should bootstrap.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 3))
|
|
|
|
r.Check(wantPeers(s2, 3))
|
|
|
|
r.Check(wantPeers(s3, 3))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
// Join the fourth node.
|
|
|
|
joinLAN(t, s4, s1)
|
|
|
|
|
|
|
|
// Wait for the new server to see itself added to the cluster.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantRaft([]*Server{s1, s2, s3, s4}))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not trigger bootstrap and new election when s3 joins, since cluster exists
|
|
|
|
func TestServer_AvoidReBootstrap(t *testing.T) {
|
|
|
|
dir1, s1 := testServerDCExpect(t, "dc1", 2)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 0)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 2)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Join the first two servers
|
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
|
2016-09-01 04:22:32 +00:00
|
|
|
// Make sure a leader is elected, grab the current term and then add in
|
2019-07-12 15:52:26 +00:00
|
|
|
// the third server.
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-09-01 04:22:32 +00:00
|
|
|
termBefore := s1.raft.Stats()["last_log_term"]
|
2019-07-12 15:52:26 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2016-09-01 04:22:32 +00:00
|
|
|
|
|
|
|
// Wait for the new server to see itself added to the cluster.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-07-12 15:52:26 +00:00
|
|
|
r.Check(wantRaft([]*Server{s1, s2, s3}))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-09-01 04:22:32 +00:00
|
|
|
|
|
|
|
// Make sure there's still a leader and that the term didn't change,
|
|
|
|
// so we know an election didn't occur.
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-09-01 04:22:32 +00:00
|
|
|
termAfter := s1.raft.Stats()["last_log_term"]
|
|
|
|
if termAfter != termBefore {
|
|
|
|
t.Fatalf("looks like an election took place")
|
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 00:41:36 +00:00
|
|
|
func TestServer_Expect_NonVoters(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-07-17 17:35:33 +00:00
|
|
|
dir1, s1 := testServerDCExpectNonVoter(t, "dc1", 2)
|
2018-09-20 00:41:36 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 2)
|
2018-09-20 00:41:36 +00:00
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 2)
|
2018-09-20 00:41:36 +00:00
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
// Join the first two servers.
|
2018-09-20 00:41:36 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
|
|
|
|
// Should have no peers yet since the bootstrap didn't occur.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
|
|
|
})
|
|
|
|
|
2019-07-17 17:35:33 +00:00
|
|
|
// Join the third node.
|
|
|
|
joinLAN(t, s3, s1)
|
2018-09-20 00:41:36 +00:00
|
|
|
|
|
|
|
// Now we have three servers so we should bootstrap.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-07-17 17:35:33 +00:00
|
|
|
r.Check(wantPeers(s1, 2))
|
|
|
|
r.Check(wantPeers(s2, 2))
|
|
|
|
r.Check(wantPeers(s3, 2))
|
2018-09-20 00:41:36 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Make sure a leader is elected
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-07-17 17:35:33 +00:00
|
|
|
r.Check(wantRaft([]*Server{s1, s2, s3}))
|
2018-09-20 00:41:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-06-16 21:36:12 +00:00
|
|
|
func TestServer_BadExpect(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-06-16 21:36:12 +00:00
|
|
|
// this one is in expect=3 mode
|
|
|
|
dir1, s1 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// this one is in expect=2 mode
|
|
|
|
dir2, s2 := testServerDCExpect(t, "dc1", 2)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// and this one is in expect=3 mode
|
|
|
|
dir3, s3 := testServerDCExpect(t, "dc1", 3)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2017-05-05 07:23:28 +00:00
|
|
|
// should have no peers yet
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-06-16 21:36:12 +00:00
|
|
|
|
|
|
|
// join the third node
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s3, s1)
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2017-05-05 07:23:28 +00:00
|
|
|
// should still have no peers (because s2 is in expect=2 mode)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-05-05 07:23:28 +00:00
|
|
|
r.Check(wantPeers(s1, 0))
|
|
|
|
r.Check(wantPeers(s2, 0))
|
|
|
|
r.Check(wantPeers(s3, 0))
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-10-04 20:43:10 +00:00
|
|
|
}
|
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
type fakeGlobalResp struct{}
|
2014-10-04 20:43:10 +00:00
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
func (r *fakeGlobalResp) Add(interface{}) {
|
|
|
|
}
|
2014-10-07 18:05:31 +00:00
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
func (r *fakeGlobalResp) New() interface{} {
|
|
|
|
return struct{}{}
|
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
|
2014-10-09 17:25:53 +00:00
|
|
|
func TestServer_globalRPCErrors(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-10-09 17:25:53 +00:00
|
|
|
dir1, s1 := testServerDC(t, "dc1")
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if len(s1.router.GetDatacenters()) != 1 {
|
|
|
|
r.Fatal(nil)
|
|
|
|
}
|
|
|
|
})
|
2014-12-05 05:32:59 +00:00
|
|
|
|
2014-10-07 18:05:31 +00:00
|
|
|
// Check that an error from a remote DC is returned
|
2014-10-09 17:25:53 +00:00
|
|
|
err := s1.globalRPC("Bad.Method", nil, &fakeGlobalResp{})
|
2014-10-04 20:43:10 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("should have errored")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), "Bad.Method") {
|
2018-03-19 16:56:00 +00:00
|
|
|
t.Fatalf("unexpected error: %s", err)
|
2014-10-04 20:43:10 +00:00
|
|
|
}
|
2014-06-16 21:36:12 +00:00
|
|
|
}
|
2014-10-07 18:05:31 +00:00
|
|
|
|
2017-05-10 21:25:48 +00:00
|
|
|
func testVerifyRPC(s1, s2 *Server, t *testing.T) (bool, error) {
|
2017-09-25 22:27:04 +00:00
|
|
|
joinLAN(t, s1, s2)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
r.Check(wantRaft([]*Server{s1, s2}))
|
|
|
|
})
|
2017-05-10 21:25:48 +00:00
|
|
|
|
|
|
|
// Have s2 make an RPC call to s1
|
2017-07-06 10:48:37 +00:00
|
|
|
var leader *metadata.Server
|
2017-08-30 17:31:36 +00:00
|
|
|
for _, server := range s2.serverLookup.Servers() {
|
2017-05-10 21:25:48 +00:00
|
|
|
if server.Name == s1.config.NodeName {
|
|
|
|
leader = server
|
|
|
|
}
|
|
|
|
}
|
2017-05-24 19:26:42 +00:00
|
|
|
if leader == nil {
|
|
|
|
t.Fatal("no leader")
|
|
|
|
}
|
2020-03-09 20:59:02 +00:00
|
|
|
return s2.connPool.Ping(leader.Datacenter, leader.ShortName, leader.Addr, leader.Version, leader.UseTLS)
|
2017-05-10 21:25:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_TLSToNoTLS(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-05-10 21:25:48 +00:00
|
|
|
// Set up a server with no TLS configured
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Add a second server with TLS configured
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
c.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.KeyFile = "../../test/client_certs/server.key"
|
2017-05-10 21:25:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
success, err := testVerifyRPC(s1, s2, t)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !success {
|
|
|
|
t.Fatalf("bad: %v", success)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_TLSForceOutgoingToNoTLS(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-05-10 21:25:48 +00:00
|
|
|
// Set up a server with no TLS configured
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Add a second server with TLS and VerifyOutgoing set
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
c.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.KeyFile = "../../test/client_certs/server.key"
|
2017-05-10 21:25:48 +00:00
|
|
|
c.VerifyOutgoing = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
_, err := testVerifyRPC(s1, s2, t)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "remote error: tls") {
|
|
|
|
t.Fatalf("should fail")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_TLSToFullVerify(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-05-10 21:25:48 +00:00
|
|
|
// Set up a server with TLS and VerifyIncoming set
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
c.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.KeyFile = "../../test/client_certs/server.key"
|
2017-05-10 21:25:48 +00:00
|
|
|
c.VerifyIncoming = true
|
|
|
|
c.VerifyOutgoing = true
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Add a second server with TLS configured
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
c.CAFile = "../../test/client_certs/rootca.crt"
|
|
|
|
c.CertFile = "../../test/client_certs/server.crt"
|
|
|
|
c.KeyFile = "../../test/client_certs/server.key"
|
2017-05-10 21:25:48 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
success, err := testVerifyRPC(s1, s2, t)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !success {
|
|
|
|
t.Fatalf("bad: %v", success)
|
|
|
|
}
|
|
|
|
}
|
2018-02-21 18:48:53 +00:00
|
|
|
|
|
|
|
func TestServer_RevokeLeadershipIdempotent(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2018-03-13 17:30:18 +00:00
|
|
|
|
2018-02-21 18:48:53 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
2019-06-19 12:50:48 +00:00
|
|
|
s1.revokeLeadership()
|
|
|
|
s1.revokeLeadership()
|
2018-02-21 18:48:53 +00:00
|
|
|
}
|
2019-04-26 18:25:03 +00:00
|
|
|
|
|
|
|
func TestServer_Reload(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
global_entry_init := &structs.ProxyConfigEntry{
|
|
|
|
Kind: structs.ProxyDefaults,
|
|
|
|
Name: structs.ProxyConfigGlobal,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
// these are made a []uint8 and a int64 to allow the Equals test to pass
|
|
|
|
// otherwise it will fail complaining about data types
|
2019-04-29 22:08:09 +00:00
|
|
|
"foo": "bar",
|
2019-04-26 18:25:03 +00:00
|
|
|
"bar": int64(1),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
dir1, s := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Build = "1.5.0"
|
2019-06-13 09:26:27 +00:00
|
|
|
c.RPCRate = 500
|
|
|
|
c.RPCMaxBurst = 5000
|
2019-04-26 18:25:03 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s.Shutdown()
|
|
|
|
|
|
|
|
testrpc.WaitForTestAgent(t, s.RPC, "dc1")
|
|
|
|
|
|
|
|
s.config.ConfigEntryBootstrap = []structs.ConfigEntry{
|
|
|
|
global_entry_init,
|
|
|
|
}
|
|
|
|
|
2019-06-13 09:26:27 +00:00
|
|
|
limiter := s.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(500), limiter.Limit())
|
|
|
|
require.Equal(t, 5000, limiter.Burst())
|
|
|
|
|
|
|
|
// Change rate limit
|
|
|
|
s.config.RPCRate = 1000
|
|
|
|
s.config.RPCMaxBurst = 10000
|
|
|
|
|
2019-04-26 18:25:03 +00:00
|
|
|
s.ReloadConfig(s.config)
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
_, entry, err := s.fsm.State().ConfigEntry(nil, structs.ProxyDefaults, structs.ProxyConfigGlobal, structs.DefaultEnterpriseMeta())
|
2019-04-26 18:25:03 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, entry)
|
|
|
|
global, ok := entry.(*structs.ProxyConfigEntry)
|
|
|
|
require.True(t, ok)
|
|
|
|
require.Equal(t, global_entry_init.Kind, global.Kind)
|
|
|
|
require.Equal(t, global_entry_init.Name, global.Name)
|
|
|
|
require.Equal(t, global_entry_init.Config, global.Config)
|
2019-06-13 09:26:27 +00:00
|
|
|
|
|
|
|
// Check rate limiter got updated
|
|
|
|
limiter = s.rpcLimiter.Load().(*rate.Limiter)
|
|
|
|
require.Equal(t, rate.Limit(1000), limiter.Limit())
|
|
|
|
require.Equal(t, 10000, limiter.Burst())
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServer_RPC_RateLimit(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dir1, conf1 := testServerConfig(t)
|
|
|
|
conf1.RPCRate = 2
|
|
|
|
conf1.RPCMaxBurst = 2
|
|
|
|
s1, err := NewServer(conf1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
|
|
|
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != structs.ErrRPCRateExceeded {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2019-04-26 18:25:03 +00:00
|
|
|
}
|
2019-11-11 20:30:01 +00:00
|
|
|
|
|
|
|
func TestServer_CALogging(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
dir1, conf1 := testServerConfig(t)
|
|
|
|
|
|
|
|
// Setup dummy logger to catch output
|
|
|
|
var buf bytes.Buffer
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.LoggerWithOutput(t, &buf)
|
2019-11-11 20:30:01 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
c, err := tlsutil.NewConfigurator(conf1.ToTLSUtilConfig(), logger)
|
2019-11-11 20:30:01 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
s1, err := NewServerLogger(conf1, logger, new(token.Store), c)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
if _, ok := s1.caProvider.(ca.NeedsLogger); !ok {
|
|
|
|
t.Fatalf("provider does not implement NeedsLogger")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait til CA root is setup
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out structs.IndexedCARoots
|
|
|
|
r.Check(s1.RPC("ConnectCA.Roots", structs.DCSpecificRequest{
|
|
|
|
Datacenter: conf1.Datacenter,
|
|
|
|
}, &out))
|
|
|
|
})
|
|
|
|
|
|
|
|
require.Contains(t, buf.String(), "consul CA provider configured")
|
|
|
|
}
|