test: convert remaining WaitForResult tests
This commit is contained in:
parent
7007c89870
commit
17f6ecdd12
|
@ -9,7 +9,7 @@ import (
|
|||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/command/agent"
|
||||
"github.com/hashicorp/consul/command/base"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
|
@ -91,12 +91,15 @@ func waitForLeader(t *testing.T, httpAddr string) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, qm, err := client.Catalog().Nodes(nil)
|
||||
return err == nil && qm.KnownLeader && qm.LastIndex > 0, err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
if !qm.KnownLeader || qm.LastIndex == 0 {
|
||||
r.Fatal("not leader")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func httpClient(addr string) (*consulapi.Client, error) {
|
||||
|
@ -203,15 +206,15 @@ func TestExecCommand_Sessions_Foreign(t *testing.T) {
|
|||
c.conf.localNode = "foo"
|
||||
|
||||
var id string
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
id, err = c.createSession()
|
||||
if err != nil && strings.Contains(err.Error(), "Failed to find Consul server") {
|
||||
err = nil
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
return id != "", err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id == "" {
|
||||
r.Fatal("no id")
|
||||
}
|
||||
})
|
||||
|
||||
se, _, err := client.Session().Info(id, nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
@ -79,32 +80,21 @@ func TestClient_JoinLAN(t *testing.T) {
|
|||
defer c1.Shutdown()
|
||||
|
||||
// Try to join
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
|
||||
addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort)
|
||||
if _, err := c1.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
}
|
||||
|
||||
// Check the members
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
server_check := len(s1.LANMembers()) == 2
|
||||
client_check := len(c1.LANMembers()) == 2
|
||||
return server_check && client_check, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
// Check we have a new consul
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := c1.servers.NumServers(), 1; got != want {
|
||||
r.Fatal("got %d servers want %d", got, want)
|
||||
}
|
||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d server LAN members want %d", got, want)
|
||||
}
|
||||
if got, want := len(c1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d client LAN members want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_JoinLAN_Invalid(t *testing.T) {
|
||||
|
@ -117,8 +107,7 @@ func TestClient_JoinLAN_Invalid(t *testing.T) {
|
|||
defer c1.Shutdown()
|
||||
|
||||
// Try to join
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
|
||||
addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort)
|
||||
if _, err := c1.JoinLAN([]string{addr}); err == nil {
|
||||
t.Fatalf("should error")
|
||||
}
|
||||
|
@ -189,12 +178,11 @@ func TestClient_RPC(t *testing.T) {
|
|||
}
|
||||
|
||||
// RPC should succeed
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
||||
return err == nil, err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
||||
r.Fatal("ping failed", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_RPC_Pool(t *testing.T) {
|
||||
|
@ -214,12 +202,14 @@ func TestClient_RPC_Pool(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for both agents to finish joining
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.LANMembers()) == 2 && len(c1.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Server has %v of %v expected members; Client has %v of %v expected members.",
|
||||
len(s1.LANMembers()), 2, len(c1.LANMembers()), 2)
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d server LAN members want %d", got, want)
|
||||
}
|
||||
if got, want := len(c1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d client LAN members want %d", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Blast out a bunch of RPC requests at the same time to try to get
|
||||
// contention opening new connections.
|
||||
|
@ -230,12 +220,11 @@ func TestClient_RPC_Pool(t *testing.T) {
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
var out struct{}
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
||||
return err == nil, err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
||||
r.Fatal("ping failed", err)
|
||||
}
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -345,20 +334,17 @@ func TestClient_RPC_TLS(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for joins to finish/RPC to succeed
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if len(s1.LANMembers()) != 2 {
|
||||
return false, fmt.Errorf("bad len: %v", len(s1.LANMembers()))
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d server LAN members want %d", got, want)
|
||||
}
|
||||
|
||||
if len(c1.LANMembers()) != 2 {
|
||||
return false, fmt.Errorf("bad len: %v", len(c1.LANMembers()))
|
||||
if got, want := len(c1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d client LAN members want %d", got, want)
|
||||
}
|
||||
|
||||
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
||||
return err == nil, err
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
||||
r.Fatal("ping failed", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_SnapshotRPC(t *testing.T) {
|
||||
|
@ -384,11 +370,11 @@ func TestClient_SnapshotRPC(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait until we've got a healthy server.
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := c1.servers.NumServers(), 1; got != want {
|
||||
r.Fatal("got %d servers want %d", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Take a snapshot.
|
||||
var snap bytes.Buffer
|
||||
|
@ -443,11 +429,11 @@ func TestClient_SnapshotRPC_TLS(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait until we've got a healthy server.
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := c1.servers.NumServers(), 1; got != want {
|
||||
r.Fatal("got %d servers want %d", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Take a snapshot.
|
||||
var snap bytes.Buffer
|
||||
|
@ -496,11 +482,14 @@ func TestClientServer_UserEvent(t *testing.T) {
|
|||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Check the members
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(c1.LANMembers()) == 2 && len(s1.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d server LAN members want %d", got, want)
|
||||
}
|
||||
if got, want := len(c1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d client LAN members want %d", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Fire the user event
|
||||
codec := rpcClient(t, s1)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -10,6 +9,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
@ -39,15 +39,15 @@ func TestLeader_RegisterMember(t *testing.T) {
|
|||
|
||||
// Client should be registered
|
||||
state := s1.fsm.State()
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("client not registered")
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
})
|
||||
|
||||
// Should have a check
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
||||
|
@ -114,15 +114,15 @@ func TestLeader_FailedMember(t *testing.T) {
|
|||
|
||||
// Should be registered
|
||||
state := s1.fsm.State()
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("client not registered")
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
})
|
||||
|
||||
// Should have a check
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
||||
|
@ -139,15 +139,15 @@ func TestLeader_FailedMember(t *testing.T) {
|
|||
t.Fatalf("bad check: %v", checks[0])
|
||||
}
|
||||
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, checks, err = state.NodeChecks(nil, c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return checks[0].Status == api.HealthCritical, errors.New(checks[0].Status)
|
||||
}); err != nil {
|
||||
t.Fatalf("check status is %v, should be critical", err)
|
||||
}
|
||||
if got, want := checks[0].Status, api.HealthCritical; got != want {
|
||||
r.Fatalf("got status %q want %q", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_LeftMember(t *testing.T) {
|
||||
|
@ -174,32 +174,31 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
state := s1.fsm.State()
|
||||
|
||||
// Should be registered
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("client should be registered")
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
})
|
||||
|
||||
// Node should leave
|
||||
c1.Leave()
|
||||
c1.Shutdown()
|
||||
|
||||
// Should be deregistered
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("client should not be registered")
|
||||
}
|
||||
if node != nil {
|
||||
r.Fatal("client still registered")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_ReapMember(t *testing.T) {
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.ACLDatacenter = "dc1"
|
||||
|
@ -224,15 +223,15 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
state := s1.fsm.State()
|
||||
|
||||
// Should be registered
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("client should be registered")
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
})
|
||||
|
||||
// Simulate a node reaping
|
||||
mems := s1.LANMembers()
|
||||
|
@ -344,15 +343,15 @@ func TestLeader_Reconcile(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should be registered
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err = state.GetNode(c1.config.NodeName)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node != nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("client should be registered")
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_Reconcile_Races(t *testing.T) {
|
||||
|
@ -375,19 +374,16 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
// Wait for the server to reconcile the client and register it.
|
||||
state := s1.fsm.State()
|
||||
var nodeAddr string
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node != nil {
|
||||
nodeAddr = node.Address
|
||||
return true, nil
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("client should be registered: %v", err)
|
||||
}
|
||||
nodeAddr = node.Address
|
||||
})
|
||||
|
||||
// Add in some metadata via the catalog (as if the agent synced it
|
||||
// there). We also set the serfHealth check to failing so the reconile
|
||||
|
@ -428,15 +424,15 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
|
||||
// Fail the member and wait for the health to go critical.
|
||||
c1.Shutdown()
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return checks[0].Status == api.HealthCritical, errors.New(checks[0].Status)
|
||||
}); err != nil {
|
||||
t.Fatalf("check status should be critical: %v", err)
|
||||
}
|
||||
if got, want := checks[0].Status, api.HealthCritical; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Make sure the metadata didn't get clobbered.
|
||||
_, node, err = state.GetNode(c1.config.NodeName)
|
||||
|
@ -476,32 +472,28 @@ func TestLeader_LeftServer(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 3 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// Kill any server
|
||||
servers[0].Shutdown()
|
||||
|
||||
// Force remove the non-leader (transition to left state)
|
||||
if err := servers[1].RemoveFailedNode(servers[0].config.NodeName); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for _, s := range servers[1:] {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 2, fmt.Errorf("%d", peers)
|
||||
if got, want := numPeers(s), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_LeftLeader(t *testing.T) {
|
||||
|
@ -529,12 +521,11 @@ func TestLeader_LeftLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 3 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Kill the leader!
|
||||
|
@ -558,25 +549,24 @@ func TestLeader_LeftLeader(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
remain = s
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 2, fmt.Errorf("%d", peers)
|
||||
}); err != nil {
|
||||
t.Fatal("should have 2 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Verify the old leader is deregistered
|
||||
state := remain.fsm.State()
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(leader.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
return node == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should be deregistered")
|
||||
}
|
||||
if node != nil {
|
||||
r.Fatal("leader should be deregistered")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_MultiBootstrap(t *testing.T) {
|
||||
|
@ -598,12 +588,11 @@ func TestLeader_MultiBootstrap(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers := s.serfLAN.Members()
|
||||
return len(peers) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 2 peerss")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s.serfLAN.Members()), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure we don't have multiple raft peers
|
||||
|
@ -640,12 +629,11 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 3 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var leader *Server
|
||||
|
@ -670,24 +658,21 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
|||
|
||||
// Wait for a new leader
|
||||
leader = nil
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
for _, s := range servers {
|
||||
if s.IsLeader() {
|
||||
leader = s
|
||||
return true, nil
|
||||
return
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have leader")
|
||||
}
|
||||
r.Fatal("no leader")
|
||||
})
|
||||
|
||||
// Check that the new leader has a pending GC expiration
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return leader.tombstoneGC.PendingExpiration(), nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have pending expiration")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if !leader.tombstoneGC.PendingExpiration() {
|
||||
r.Fatal("leader has no pending GC expiration")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_ReapTombstones(t *testing.T) {
|
||||
|
@ -746,17 +731,17 @@ func TestLeader_ReapTombstones(t *testing.T) {
|
|||
|
||||
// Check that the new leader has a pending GC expiration by
|
||||
// watching for the tombstone to get removed.
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
snap := state.Snapshot()
|
||||
defer snap.Close()
|
||||
stones, err := snap.Tombstones()
|
||||
if err != nil {
|
||||
return false, err
|
||||
r.Fatal(err)
|
||||
}
|
||||
return stones.Next() == nil, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stones.Next() != nil {
|
||||
r.Fatal("should have no tombstones")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_RollRaftServer(t *testing.T) {
|
||||
|
@ -792,24 +777,26 @@ func TestLeader_RollRaftServer(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 3 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Kill the v1 server
|
||||
s2.Shutdown()
|
||||
|
||||
for _, s := range []*Server{s1, s3} {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
minVer, err := ServerMinRaftProtocol(s.LANMembers())
|
||||
return minVer == 2, err
|
||||
}); err != nil {
|
||||
t.Fatalf("minimum protocol version among servers should be 2")
|
||||
}
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
if got, want := minVer, 2; got != want {
|
||||
r.Fatalf("got min raft version %d want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Replace the dead server with one running raft protocol v3
|
||||
|
@ -827,12 +814,12 @@ func TestLeader_RollRaftServer(t *testing.T) {
|
|||
|
||||
// Make sure the dead server is removed and we're back to 3 total peers
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
addrs := 0
|
||||
ids := 0
|
||||
future := s.raft.GetConfiguration()
|
||||
if err := future.Error(); err != nil {
|
||||
return false, err
|
||||
r.Fatal(err)
|
||||
}
|
||||
for _, server := range future.Configuration().Servers {
|
||||
if string(server.ID) == string(server.Address) {
|
||||
|
@ -841,10 +828,13 @@ func TestLeader_RollRaftServer(t *testing.T) {
|
|||
ids++
|
||||
}
|
||||
}
|
||||
return addrs == 2 && ids == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("should see 2 legacy IDs and 1 GUID")
|
||||
}
|
||||
if got, want := addrs, 2; got != want {
|
||||
r.Fatalf("got %d server addresses want %d", got, want)
|
||||
}
|
||||
if got, want := ids, 1; got != want {
|
||||
r.Fatalf("got %d server ids want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -880,28 +870,27 @@ func TestLeader_ChangeServerID(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 3 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Shut down a server, freeing up its address/port
|
||||
s3.Shutdown()
|
||||
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
alive := 0
|
||||
for _, m := range s1.LANMembers() {
|
||||
if m.Status == serf.StatusAlive {
|
||||
alive++
|
||||
}
|
||||
}
|
||||
return alive == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 2 alive members")
|
||||
}
|
||||
if got, want := alive, 2; got != want {
|
||||
r.Fatalf("got %d alive members want %d", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
// Bring up a new server with s3's address that will get a different ID
|
||||
dir4, s4 := testServerWithConfig(t, func(c *Config) {
|
||||
|
@ -922,11 +911,10 @@ func TestLeader_ChangeServerID(t *testing.T) {
|
|||
|
||||
// Make sure the dead server is removed and we're back to 3 total peers
|
||||
for _, s := range servers {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have 3 peers")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue