acbfeb5815
This change updates tests to honor `BootstrapExpect` exclusively when forming test clusters and removes test only knobs, e.g. `config.DevDisableBootstrap`. Background: Test cluster creation is fragile. Test servers don't follow the BootstapExpected route like production clusters. Instead they start as single node clusters and then get rejoin and may risk causing brain split or other test flakiness. The test framework expose few knobs to control those (e.g. `config.DevDisableBootstrap` and `config.Bootstrap`) that control whether a server should bootstrap the cluster. These flags are confusing and it's unclear when to use: their usage in multi-node cluster isn't properly documented. Furthermore, they have some bad side-effects as they don't control Raft library: If `config.DevDisableBootstrap` is true, the test server may not immediately attempt to bootstrap a cluster, but after an election timeout (~50ms), Raft may force a leadership election and win it (with only one vote) and cause a split brain. The knobs are also confusing as Bootstrap is an overloaded term. In BootstrapExpect, we refer to bootstrapping the cluster only after N servers are connected. But in tests and the knobs above, it refers to whether the server is a single node cluster and shouldn't wait for any other server. Changes: This commit makes two changes: First, it relies on `BootstrapExpected` instead of `Bootstrap` and/or `DevMode` flags. This change is relatively trivial. Introduce a `Bootstrapped` flag to track if the cluster is bootstrapped. This allows us to keep `BootstrapExpected` immutable. Previously, the flag was a config value but it gets set to 0 after cluster bootstrap completes.
312 lines
7.2 KiB
Go
312 lines
7.2 KiB
Go
package nomad
|
|
|
|
import (
|
|
"net"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/hashicorp/nomad/client"
|
|
"github.com/hashicorp/nomad/client/config"
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
"github.com/hashicorp/nomad/testutil"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
type namedConnWrapper struct {
|
|
net.Conn
|
|
name string
|
|
}
|
|
|
|
type namedAddr string
|
|
|
|
func (n namedAddr) String() string { return string(n) }
|
|
func (n namedAddr) Network() string { return string(n) }
|
|
|
|
func (n namedConnWrapper) LocalAddr() net.Addr {
|
|
return namedAddr(n.name)
|
|
}
|
|
|
|
func TestServer_removeNodeConn_differentAddrs(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
defer cleanupS1()
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
p1, p2 := net.Pipe()
|
|
w1 := namedConnWrapper{
|
|
Conn: p1,
|
|
name: "a",
|
|
}
|
|
w2 := namedConnWrapper{
|
|
Conn: p2,
|
|
name: "b",
|
|
}
|
|
|
|
// Add the connections
|
|
nodeID := uuid.Generate()
|
|
ctx1 := &RPCContext{
|
|
Conn: w1,
|
|
NodeID: nodeID,
|
|
}
|
|
ctx2 := &RPCContext{
|
|
Conn: w2,
|
|
NodeID: nodeID,
|
|
}
|
|
|
|
s1.addNodeConn(ctx1)
|
|
s1.addNodeConn(ctx2)
|
|
require.Len(s1.connectedNodes(), 1)
|
|
require.Len(s1.nodeConns[nodeID], 2)
|
|
|
|
// Check that the value is the second conn.
|
|
state, ok := s1.getNodeConn(nodeID)
|
|
require.True(ok)
|
|
require.Equal(state.Ctx.Conn.LocalAddr().String(), w2.name)
|
|
|
|
// Delete the first
|
|
s1.removeNodeConn(ctx1)
|
|
require.Len(s1.connectedNodes(), 1)
|
|
require.Len(s1.nodeConns[nodeID], 1)
|
|
|
|
// Check that the value is the second conn.
|
|
state, ok = s1.getNodeConn(nodeID)
|
|
require.True(ok)
|
|
require.Equal(state.Ctx.Conn.LocalAddr().String(), w2.name)
|
|
|
|
// Delete the second
|
|
s1.removeNodeConn(ctx2)
|
|
require.Len(s1.connectedNodes(), 0)
|
|
|
|
_, ok = s1.getNodeConn(nodeID)
|
|
require.False(ok)
|
|
}
|
|
|
|
func TestServerWithNodeConn_NoPath(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 2
|
|
})
|
|
defer cleanupS1()
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 2
|
|
})
|
|
defer cleanupS2()
|
|
TestJoin(t, s1, s2)
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
|
|
nodeID := uuid.Generate()
|
|
srv, err := s1.serverWithNodeConn(nodeID, s1.Region())
|
|
require.Nil(srv)
|
|
require.EqualError(err, structs.ErrNoNodeConn.Error())
|
|
}
|
|
|
|
func TestServerWithNodeConn_NoPath_Region(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
defer cleanupS1()
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
nodeID := uuid.Generate()
|
|
srv, err := s1.serverWithNodeConn(nodeID, "fake-region")
|
|
require.Nil(srv)
|
|
require.EqualError(err, structs.ErrNoRegionPath.Error())
|
|
}
|
|
|
|
func TestServerWithNodeConn_Path(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 2
|
|
})
|
|
defer cleanupS1()
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 2
|
|
})
|
|
defer cleanupS2()
|
|
TestJoin(t, s1, s2)
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
|
|
// Create a fake connection for the node on server 2
|
|
nodeID := uuid.Generate()
|
|
s2.addNodeConn(&RPCContext{
|
|
NodeID: nodeID,
|
|
})
|
|
|
|
srv, err := s1.serverWithNodeConn(nodeID, s1.Region())
|
|
require.NotNil(srv)
|
|
require.Equal(srv.Addr.String(), s2.config.RPCAddr.String())
|
|
require.Nil(err)
|
|
}
|
|
|
|
func TestServerWithNodeConn_Path_Region(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
defer cleanupS1()
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
|
c.Region = "two"
|
|
})
|
|
defer cleanupS2()
|
|
TestJoin(t, s1, s2)
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
|
|
// Create a fake connection for the node on server 2
|
|
nodeID := uuid.Generate()
|
|
s2.addNodeConn(&RPCContext{
|
|
NodeID: nodeID,
|
|
})
|
|
|
|
srv, err := s1.serverWithNodeConn(nodeID, s2.Region())
|
|
require.NotNil(srv)
|
|
require.Equal(srv.Addr.String(), s2.config.RPCAddr.String())
|
|
require.Nil(err)
|
|
}
|
|
|
|
func TestServerWithNodeConn_Path_Newest(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS1()
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS2()
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS3()
|
|
TestJoin(t, s1, s2, s3)
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
testutil.WaitForLeader(t, s3.RPC)
|
|
|
|
// Create a fake connection for the node on server 2 and 3
|
|
nodeID := uuid.Generate()
|
|
s2.addNodeConn(&RPCContext{
|
|
NodeID: nodeID,
|
|
})
|
|
s3.addNodeConn(&RPCContext{
|
|
NodeID: nodeID,
|
|
})
|
|
|
|
srv, err := s1.serverWithNodeConn(nodeID, s1.Region())
|
|
require.NotNil(srv)
|
|
require.Equal(srv.Addr.String(), s3.config.RPCAddr.String())
|
|
require.Nil(err)
|
|
}
|
|
|
|
func TestServerWithNodeConn_PathAndErr(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS1()
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS2()
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS3()
|
|
TestJoin(t, s1, s2, s3)
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
testutil.WaitForLeader(t, s3.RPC)
|
|
|
|
// Create a fake connection for the node on server 2
|
|
nodeID := uuid.Generate()
|
|
s2.addNodeConn(&RPCContext{
|
|
NodeID: nodeID,
|
|
})
|
|
|
|
// Shutdown the RPC layer for server 3
|
|
s3.rpcListener.Close()
|
|
|
|
srv, err := s1.serverWithNodeConn(nodeID, s1.Region())
|
|
require.NotNil(srv)
|
|
require.Equal(srv.Addr.String(), s2.config.RPCAddr.String())
|
|
require.Nil(err)
|
|
}
|
|
|
|
func TestServerWithNodeConn_NoPathAndErr(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS1()
|
|
s2, cleanupS2 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS2()
|
|
s3, cleanupS3 := TestServer(t, func(c *Config) {
|
|
c.BootstrapExpect = 3
|
|
})
|
|
defer cleanupS3()
|
|
TestJoin(t, s1, s2, s3)
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
testutil.WaitForLeader(t, s3.RPC)
|
|
|
|
// Shutdown the RPC layer for server 3
|
|
s3.rpcListener.Close()
|
|
|
|
srv, err := s1.serverWithNodeConn(uuid.Generate(), s1.Region())
|
|
require.Nil(srv)
|
|
require.NotNil(err)
|
|
|
|
// the exact error seems to be dependent on timing and raft protocol version
|
|
if !strings.Contains(err.Error(), "failed querying") && !strings.Contains(err.Error(), "No path to node") {
|
|
require.Contains(err.Error(), "failed querying")
|
|
}
|
|
}
|
|
|
|
func TestNodeStreamingRpc_badEndpoint(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
defer cleanupS1()
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
c, cleanupC := client.TestClient(t, func(c *config.Config) {
|
|
c.Servers = []string{s1.config.RPCAddr.String()}
|
|
})
|
|
defer cleanupC()
|
|
|
|
// Wait for the client to connect
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
nodes := s1.connectedNodes()
|
|
return len(nodes) == 1, nil
|
|
}, func(err error) {
|
|
t.Fatalf("should have a clients")
|
|
})
|
|
|
|
state, ok := s1.getNodeConn(c.NodeID())
|
|
require.True(ok)
|
|
|
|
conn, err := NodeStreamingRpc(state.Session, "Bogus")
|
|
require.Nil(conn)
|
|
require.NotNil(err)
|
|
require.Contains(err.Error(), "Bogus")
|
|
require.True(structs.IsErrUnknownMethod(err))
|
|
}
|