Merge pull request #136 from hashicorp/f-testutil-package

WIP: Proof of concept using `WaitForResult` in tests
This commit is contained in:
Armon Dadgar 2014-05-16 15:03:50 -07:00
commit 6c0479f082
22 changed files with 369 additions and 365 deletions

View File

@ -2,12 +2,14 @@ package agent
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/serf/serf"
"net/http"
"os"
"testing"
"time"
"errors"
)
func TestHTTPAgentServices(t *testing.T) {
@ -167,9 +169,11 @@ func TestHTTPAgentJoin_WAN(t *testing.T) {
t.Fatalf("Err: %v", obj)
}
if len(a2.WANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(a2.WANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("should have 2 members")
}
})
}
func TestHTTPAgentForceLeave(t *testing.T) {
@ -189,9 +193,7 @@ func TestHTTPAgentForceLeave(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Shutdown, wait for detection
a2.Shutdown()
time.Sleep(500 * time.Millisecond)
// Force leave now
req, err := http.NewRequest("GET", fmt.Sprintf("/v1/agent/force-leave/%s", a2.config.NodeName), nil)
@ -207,11 +209,13 @@ func TestHTTPAgentForceLeave(t *testing.T) {
t.Fatalf("Err: %v", obj)
}
// SHould be left
mem := srv.agent.LANMembers()
if mem[1].Status != serf.StatusLeft {
t.Fatalf("should have left: %v", mem)
}
testutil.WaitForResult(func() (bool, error) {
m := srv.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}, func(err error) {
t.Fatalf("member status is %v, should be left", err)
})
}
func TestHTTPAgentRegisterCheck(t *testing.T) {

View File

@ -2,6 +2,7 @@ package agent
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"net/http"
"net/http/httptest"
@ -16,8 +17,7 @@ func TestCatalogRegister(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
req, err := http.NewRequest("GET", "/v1/catalog/register", nil)
@ -47,8 +47,7 @@ func TestCatalogDeregister(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
req, err := http.NewRequest("GET", "/v1/catalog/deregister", nil)
@ -77,9 +76,6 @@ func TestCatalogDatacenters(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for initialization
time.Sleep(10 * time.Millisecond)
obj, err := srv.CatalogDatacenters(nil, nil)
if err != nil {
t.Fatalf("err: %v", err)
@ -97,8 +93,7 @@ func TestCatalogNodes(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -106,6 +101,7 @@ func TestCatalogNodes(t *testing.T) {
Node: "foo",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -137,13 +133,13 @@ func TestCatalogNodes_Blocking(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.DCSpecificRequest{
Datacenter: "dc1",
}
var out structs.IndexedNodes
if err := srv.agent.RPC("Catalog.ListNodes", *args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -152,7 +148,7 @@ func TestCatalogNodes_Blocking(t *testing.T) {
// Do an update in a little while
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
time.Sleep(50 * time.Millisecond)
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
@ -178,7 +174,8 @@ func TestCatalogNodes_Blocking(t *testing.T) {
}
// Should block for a while
if time.Now().Sub(start) < 100*time.Millisecond {
if time.Now().Sub(start) < 50 * time.Millisecond {
// TODO: Failing
t.Fatalf("too fast")
}
@ -198,8 +195,7 @@ func TestCatalogServices(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -210,6 +206,7 @@ func TestCatalogServices(t *testing.T) {
Service: "api",
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -240,8 +237,7 @@ func TestCatalogServiceNodes(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -253,6 +249,7 @@ func TestCatalogServiceNodes(t *testing.T) {
Tags: []string{"a"},
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -283,8 +280,7 @@ func TestCatalogNodeServices(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -296,6 +292,7 @@ func TestCatalogNodeServices(t *testing.T) {
Tags: []string{"a"},
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)

View File

@ -31,7 +31,7 @@ func expectStatus(t *testing.T, script, status string) {
Notify: mock,
CheckID: "foo",
Script: script,
Interval: 25 * time.Millisecond,
Interval: 10 * time.Millisecond,
Logger: log.New(os.Stderr, "", log.LstdFlags),
}
check.Start()

View File

@ -2,12 +2,12 @@ package agent
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"github.com/miekg/dns"
"os"
"strings"
"testing"
"time"
)
func makeDNSServer(t *testing.T) (string, *DNSServer) {
@ -65,8 +65,7 @@ func TestDNS_NodeLookup(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -74,6 +73,7 @@ func TestDNS_NodeLookup(t *testing.T) {
Node: "foo",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -129,8 +129,7 @@ func TestDNS_NodeLookup_PeriodName(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node with period in name
args := &structs.RegisterRequest{
@ -138,6 +137,7 @@ func TestDNS_NodeLookup_PeriodName(t *testing.T) {
Node: "foo.bar",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -171,8 +171,7 @@ func TestDNS_NodeLookup_AAAA(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -180,6 +179,7 @@ func TestDNS_NodeLookup_AAAA(t *testing.T) {
Node: "bar",
Address: "::4242:4242",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -213,8 +213,7 @@ func TestDNS_NodeLookup_CNAME(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -222,6 +221,7 @@ func TestDNS_NodeLookup_CNAME(t *testing.T) {
Node: "google",
Address: "www.google.com",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -256,8 +256,7 @@ func TestDNS_ServiceLookup(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -270,6 +269,7 @@ func TestDNS_ServiceLookup(t *testing.T) {
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -317,8 +317,7 @@ func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -331,6 +330,7 @@ func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -378,8 +378,7 @@ func TestDNS_ServiceLookup_Dedup(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -392,6 +391,7 @@ func TestDNS_ServiceLookup_Dedup(t *testing.T) {
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -455,8 +455,7 @@ func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -469,6 +468,7 @@ func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -584,8 +584,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
args := &structs.RegisterRequest{
@ -603,6 +602,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
Status: structs.HealthCritical,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -648,8 +648,7 @@ func TestDNS_ServiceLookup_Randomize(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register nodes
for i := 0; i < 3*maxServiceResponses; i++ {
@ -662,6 +661,7 @@ func TestDNS_ServiceLookup_Randomize(t *testing.T) {
Port: 8000,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -712,8 +712,7 @@ func TestDNS_ServiceLookup_CNAME(t *testing.T) {
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
@ -725,6 +724,7 @@ func TestDNS_ServiceLookup_CNAME(t *testing.T) {
Port: 80,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)

View File

@ -2,12 +2,12 @@ package agent
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
)
func TestHealthChecksInState(t *testing.T) {
@ -16,8 +16,7 @@ func TestHealthChecksInState(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
req, err := http.NewRequest("GET", "/v1/health/state/passing?dc=dc1", nil)
if err != nil {
@ -29,6 +28,8 @@ func TestHealthChecksInState(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
// TODO: Failing
assertIndex(t, resp)
// Should be 1 health check for the server
@ -44,8 +45,7 @@ func TestHealthNodeChecks(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
req, err := http.NewRequest("GET",
fmt.Sprintf("/v1/health/node/%s?dc=dc1", srv.agent.config.NodeName), nil)
@ -73,8 +73,7 @@ func TestHealthServiceChecks(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Create a service check
args := &structs.RegisterRequest{
@ -87,6 +86,7 @@ func TestHealthServiceChecks(t *testing.T) {
ServiceID: "consul",
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -107,6 +107,7 @@ func TestHealthServiceChecks(t *testing.T) {
// Should be 1 health check for consul
nodes := obj.(structs.HealthChecks)
if len(nodes) != 1 {
// TODO: Failing
t.Fatalf("bad: %v", obj)
}
}
@ -117,8 +118,7 @@ func TestHealthServiceNodes(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
req, err := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1", nil)
if err != nil {
@ -130,6 +130,8 @@ func TestHealthServiceNodes(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
// TODO: Failing
assertIndex(t, resp)
// Should be 1 health check for consul
@ -145,8 +147,7 @@ func TestHealthServiceNodes_PassingFilter(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Create a failing service check
args := &structs.RegisterRequest{
@ -160,6 +161,7 @@ func TestHealthServiceNodes_PassingFilter(t *testing.T) {
Status: structs.HealthCritical,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
@ -175,6 +177,8 @@ func TestHealthServiceNodes_PassingFilter(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
// TODO: Failing
assertIndex(t, resp)
// Should be 0 health check for consul

View File

@ -99,9 +99,6 @@ func TestContentTypeIsJSON(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
resp := httptest.NewRecorder()
handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {

View File

@ -3,13 +3,13 @@ package agent
import (
"bytes"
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
"time"
)
func TestKVSEndpoint_PUT_GET_DELETE(t *testing.T) {
@ -18,8 +18,7 @@ func TestKVSEndpoint_PUT_GET_DELETE(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
keys := []string{
"baz",
@ -94,8 +93,7 @@ func TestKVSEndpoint_Recurse(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
keys := []string{
"bar",
@ -191,8 +189,7 @@ func TestKVSEndpoint_CAS(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
{
buf := bytes.NewBuffer([]byte("test"))
@ -289,8 +286,7 @@ func TestKVSEndpoint_ListKeys(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
keys := []string{
"bar",

View File

@ -1,6 +1,7 @@
package agent
import (
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"os"
"reflect"
@ -14,8 +15,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
defer os.RemoveAll(dir)
defer agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, agent.RPC, "dc1")
// Register info
args := &structs.RegisterRequest{
@ -23,9 +23,9 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Node: agent.config.NodeName,
Address: "127.0.0.1",
}
var out struct{}
// Exists both, same (noop)
var out struct{}
srv1 := &structs.NodeService{
ID: "mysql",
Service: "mysql",
@ -137,8 +137,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
defer os.RemoveAll(dir)
defer agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, agent.RPC, "dc1")
// Register info
args := &structs.RegisterRequest{
@ -146,9 +145,9 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
Node: agent.config.NodeName,
Address: "127.0.0.1",
}
var out struct{}
// Exists both, same (noop)
var out struct{}
chk1 := &structs.HealthCheck{
Node: agent.config.NodeName,
CheckID: "mysql",

View File

@ -3,12 +3,13 @@ package agent
import (
"fmt"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/serf/testutil"
"github.com/hashicorp/consul/testutil"
"io"
"net"
"os"
"strings"
"testing"
"errors"
"time"
)
@ -59,35 +60,32 @@ func TestRPCClientForceLeave(t *testing.T) {
p2 := testRPCClient(t)
defer p1.Close()
defer p2.Close()
testutil.Yield()
s2Addr := fmt.Sprintf("127.0.0.1:%d", p2.agent.config.Ports.SerfLan)
if _, err := p1.agent.JoinLAN([]string{s2Addr}); err != nil {
t.Fatalf("err: %s", err)
}
testutil.Yield()
if err := p2.agent.Shutdown(); err != nil {
t.Fatalf("err: %s", err)
}
time.Sleep(time.Second)
if err := p1.client.ForceLeave(p2.agent.config.NodeName); err != nil {
t.Fatalf("err: %s", err)
}
testutil.Yield()
m := p1.agent.LANMembers()
if len(m) != 2 {
t.Fatalf("should have 2 members: %#v", m)
}
if m[1].Status != serf.StatusLeft {
t.Fatalf("should be left: %#v %v", m[1], m[1].Status == serf.StatusLeft)
}
testutil.WaitForResult(func() (bool, error) {
m := p1.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}, func(err error) {
t.Fatalf("member status is %v, should be left", err)
})
}
func TestRPCClientJoinLAN(t *testing.T) {
@ -95,7 +93,6 @@ func TestRPCClientJoinLAN(t *testing.T) {
p2 := testRPCClient(t)
defer p1.Close()
defer p2.Close()
testutil.Yield()
s2Addr := fmt.Sprintf("127.0.0.1:%d", p2.agent.config.Ports.SerfLan)
n, err := p1.client.Join([]string{s2Addr}, false)
@ -113,7 +110,6 @@ func TestRPCClientJoinWAN(t *testing.T) {
p2 := testRPCClient(t)
defer p1.Close()
defer p2.Close()
testutil.Yield()
s2Addr := fmt.Sprintf("127.0.0.1:%d", p2.agent.config.Ports.SerfWan)
n, err := p1.client.Join([]string{s2Addr}, true)
@ -131,7 +127,6 @@ func TestRPCClientLANMembers(t *testing.T) {
p2 := testRPCClient(t)
defer p1.Close()
defer p2.Close()
testutil.Yield()
mem, err := p1.client.LANMembers()
if err != nil {
@ -148,8 +143,6 @@ func TestRPCClientLANMembers(t *testing.T) {
t.Fatalf("err: %s", err)
}
testutil.Yield()
mem, err = p1.client.LANMembers()
if err != nil {
t.Fatalf("err: %s", err)
@ -165,7 +158,6 @@ func TestRPCClientWANMembers(t *testing.T) {
p2 := testRPCClient(t)
defer p1.Close()
defer p2.Close()
testutil.Yield()
mem, err := p1.client.WANMembers()
if err != nil {
@ -182,8 +174,6 @@ func TestRPCClientWANMembers(t *testing.T) {
t.Fatalf("err: %s", err)
}
testutil.Yield()
mem, err = p1.client.WANMembers()
if err != nil {
t.Fatalf("err: %s", err)
@ -194,16 +184,33 @@ func TestRPCClientWANMembers(t *testing.T) {
}
}
func TestRPCClientStats(t *testing.T) {
p1 := testRPCClient(t)
defer p1.Close()
stats, err := p1.client.Stats()
if err != nil {
t.Fatalf("err: %s", err)
}
if _, ok := stats["agent"]; !ok {
t.Fatalf("bad: %#v", stats)
}
if _, ok := stats["consul"]; !ok {
t.Fatalf("bad: %#v", stats)
}
}
func TestRPCClientLeave(t *testing.T) {
p1 := testRPCClient(t)
defer p1.Close()
testutil.Yield()
if err := p1.client.Leave(); err != nil {
t.Fatalf("err: %s", err)
}
testutil.Yield()
time.Sleep(1 * time.Second)
select {
case <-p1.agent.ShutdownCh():
@ -215,7 +222,6 @@ func TestRPCClientLeave(t *testing.T) {
func TestRPCClientMonitor(t *testing.T) {
p1 := testRPCClient(t)
defer p1.Close()
testutil.Yield()
eventCh := make(chan string, 64)
if handle, err := p1.client.Monitor("debug", eventCh); err != nil {
@ -224,8 +230,6 @@ func TestRPCClientMonitor(t *testing.T) {
defer p1.client.Stop(handle)
}
testutil.Yield()
found := false
OUTER1:
for {
@ -239,12 +243,14 @@ OUTER1:
}
}
if !found {
// TODO: Failing
t.Fatalf("should log client accept")
}
// Join a bad thing to generate more events
p1.agent.JoinLAN(nil)
testutil.Yield()
time.Sleep(1 * time.Second)
found = false
OUTER2:
@ -262,22 +268,3 @@ OUTER2:
t.Fatalf("should log joining")
}
}
func TestRPCClientStats(t *testing.T) {
p1 := testRPCClient(t)
defer p1.Close()
testutil.Yield()
stats, err := p1.client.Stats()
if err != nil {
t.Fatalf("err: %s", err)
}
if _, ok := stats["agent"]; !ok {
t.Fatalf("bad: %#v", stats)
}
if _, ok := stats["consul"]; !ok {
t.Fatalf("bad: %#v", stats)
}
}

View File

@ -3,7 +3,7 @@ package agent
import (
"os"
"testing"
"time"
"github.com/hashicorp/consul/testutil"
)
func TestStatusLeader(t *testing.T) {
@ -12,8 +12,7 @@ func TestStatusLeader(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
obj, err := srv.StatusLeader(nil, nil)
if err != nil {

View File

@ -3,6 +3,7 @@ package agent
import (
"bytes"
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"io"
"io/ioutil"
@ -12,7 +13,6 @@ import (
"path/filepath"
"reflect"
"testing"
"time"
)
func TestUiIndex(t *testing.T) {
@ -60,8 +60,7 @@ func TestUiNodes(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
req, err := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1", nil)
if err != nil {
@ -88,8 +87,7 @@ func TestUiNodeInfo(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
req, err := http.NewRequest("GET",
fmt.Sprintf("/v1/internal/ui/node/%s", srv.agent.config.NodeName), nil)
@ -102,6 +100,8 @@ func TestUiNodeInfo(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
// TODO: Failing
assertIndex(t, resp)
// Should be 1 node for the server

View File

@ -2,12 +2,12 @@ package command
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/serf/testutil"
"github.com/mitchellh/cli"
"strings"
"testing"
"time"
"errors"
)
func TestForceLeaveCommand_implements(t *testing.T) {
@ -26,13 +26,9 @@ func TestForceLeaveCommandRun(t *testing.T) {
t.Fatalf("err: %s", err)
}
testutil.Yield()
// Forcibly shutdown a2 so that it appears "failed" in a1
a2.Shutdown()
time.Sleep(time.Second)
ui := new(cli.MockUi)
c := &ForceLeaveCommand{Ui: ui}
args := []string{
@ -50,9 +46,13 @@ func TestForceLeaveCommandRun(t *testing.T) {
t.Fatalf("should have 2 members: %#v", m)
}
if m[1].Status != serf.StatusLeft {
t.Fatalf("should be left: %#v", m[1])
}
testutil.WaitForResult(func() (bool, error) {
m = a1.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}, func(err error) {
t.Fatalf("member status is %v, should be left", err)
})
}
func TestForceLeaveCommandRun_noAddrs(t *testing.T) {

View File

@ -3,6 +3,7 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testutil"
"net/rpc"
"os"
"sort"
@ -35,12 +36,12 @@ func TestCatalogRegister(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
if err := client.Call("Catalog.Register", &arg, &out); err != nil {
testutil.WaitForResult(func() (bool, error) {
err := client.Call("Catalog.Register", &arg, &out)
return err == nil, err
}, func(err error) {
t.Fatalf("err: %v", err)
}
})
}
func TestCatalogRegister_ForwardLeader(t *testing.T) {
@ -63,8 +64,8 @@ func TestCatalogRegister_ForwardLeader(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client1.Call, "dc1")
testutil.WaitForLeader(t, client2.Call, "dc1")
// Use the follower as the client
var client *rpc.Client
@ -108,8 +109,7 @@ func TestCatalogRegister_ForwardDC(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for the leaders
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc2")
arg := structs.RegisterRequest{
Datacenter: "dc2", // SHould forward through s1
@ -145,8 +145,7 @@ func TestCatalogDeregister(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
if err := client.Call("Catalog.Deregister", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
@ -170,7 +169,8 @@ func TestCatalogListDatacenters(t *testing.T) {
if _, err := s2.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
time.Sleep(10 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
var out []string
if err := client.Call("Catalog.ListDatacenters", struct{}{}, &out); err != nil {
@ -207,19 +207,17 @@ func TestCatalogListNodes(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
if err := client.Call("Catalog.ListNodes", &args, &out); err != nil {
testutil.WaitForResult(func() (bool, error) {
client.Call("Catalog.ListNodes", &args, &out)
return len(out.Nodes) == 2, nil
}, func(err error) {
t.Fatalf("err: %v", err)
}
if len(out.Nodes) != 2 {
t.Fatalf("bad: %v", out)
}
})
// Server node is auto added from Serf
if out.Nodes[0].Node != s1.config.NodeName {
@ -253,8 +251,8 @@ func TestCatalogListNodes_StaleRaad(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client1.Call, "dc1")
testutil.WaitForLeader(t, client2.Call, "dc1")
// Use the follower as the client
var client *rpc.Client
@ -317,8 +315,8 @@ func TestCatalogListNodes_ConsistentRead_Fail(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client1.Call, "dc1")
testutil.WaitForLeader(t, client2.Call, "dc1")
// Use the leader as the client, kill the follower
var client *rpc.Client
@ -367,8 +365,8 @@ func TestCatalogListNodes_ConsistentRead(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client1.Call, "dc1")
testutil.WaitForLeader(t, client2.Call, "dc1")
// Use the leader as the client, kill the follower
var client *rpc.Client
@ -402,9 +400,6 @@ func BenchmarkCatalogListNodes(t *testing.B) {
client := rpcClient(nil, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
@ -435,8 +430,7 @@ func TestCatalogListServices(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
@ -473,8 +467,7 @@ func TestCatalogListServices_Blocking(t *testing.T) {
}
var out structs.IndexedServices
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
// Run the query
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
@ -500,7 +493,7 @@ func TestCatalogListServices_Blocking(t *testing.T) {
}
// Should block at least 100ms
if time.Now().Sub(start) < 100*time.Millisecond {
if time.Now().Sub(start) < 100 * time.Millisecond {
t.Fatalf("too fast")
}
@ -527,8 +520,7 @@ func TestCatalogListServices_Timeout(t *testing.T) {
}
var out structs.IndexedServices
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
// Run the query
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
@ -547,7 +539,8 @@ func TestCatalogListServices_Timeout(t *testing.T) {
}
// Should block at least 100ms
if time.Now().Sub(start) < 100*time.Millisecond {
if time.Now().Sub(start) < 100 * time.Millisecond {
// TODO: Failing
t.Fatalf("too fast")
}
@ -609,8 +602,7 @@ func TestCatalogListServiceNodes(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
@ -653,8 +645,7 @@ func TestCatalogNodeServices(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
@ -705,8 +696,7 @@ func TestCatalogRegister_FailedCase1(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
if err := client.Call("Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)

View File

@ -2,6 +2,7 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"net"
"os"
@ -80,12 +81,12 @@ func TestClient_JoinLAN(t *testing.T) {
t.Fatalf("bad len")
}
time.Sleep(10 * time.Millisecond)
// Check we have a new consul
if len(c1.consuls) != 1 {
testutil.WaitForResult(func() (bool, error) {
return len(c1.consuls) == 1, nil
}, func(err error) {
t.Fatalf("expected consul server")
}
})
}
func TestClient_RPC(t *testing.T) {
@ -119,12 +120,13 @@ func TestClient_RPC(t *testing.T) {
t.Fatalf("bad len")
}
time.Sleep(10 * time.Millisecond)
// RPC shoudl succeed
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
// RPC should succeed
testutil.WaitForResult(func() (bool, error) {
err := c1.RPC("Status.Ping", struct{}{}, &out)
return err == nil, err
}, func(err error) {
t.Fatalf("err: %v", err)
}
})
}
func TestClient_RPC_TLS(t *testing.T) {
@ -171,10 +173,11 @@ func TestClient_RPC_TLS(t *testing.T) {
t.Fatalf("bad len")
}
time.Sleep(10 * time.Millisecond)
// RPC shoudl succeed
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
// RPC should succeed
testutil.WaitForResult(func() (bool, error) {
err := c1.RPC("Status.Ping", struct{}{}, &out)
return err == nil, err
}, func(err error) {
t.Fatalf("err: %v", err)
}
})
}

View File

@ -1,10 +1,10 @@
package consul
import (
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"os"
"testing"
"time"
)
func TestHealth_ChecksInState(t *testing.T) {
@ -14,8 +14,7 @@ func TestHealth_ChecksInState(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
@ -61,8 +60,7 @@ func TestHealth_NodeChecks(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
@ -103,8 +101,7 @@ func TestHealth_ServiceChecks(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
@ -150,8 +147,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",

View File

@ -1,10 +1,10 @@
package consul
import (
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"os"
"testing"
"time"
)
func TestInternal_NodeInfo(t *testing.T) {
@ -14,8 +14,7 @@ func TestInternal_NodeInfo(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
@ -68,8 +67,7 @@ func TestInternal_NodeDump(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",

View File

@ -1,10 +1,10 @@
package consul
import (
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"os"
"testing"
"time"
)
func TestKVS_Apply(t *testing.T) {
@ -14,8 +14,7 @@ func TestKVS_Apply(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.KVSRequest{
Datacenter: "dc1",
@ -71,8 +70,7 @@ func TestKVS_Get(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
arg := structs.KVSRequest{
Datacenter: "dc1",
@ -119,8 +117,7 @@ func TestKVSEndpoint_List(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
keys := []string{
"/test/key1",
@ -179,8 +176,7 @@ func TestKVSEndpoint_ListKeys(t *testing.T) {
client := rpcClient(t, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
keys := []string{
"/test/key1",

View File

@ -2,10 +2,12 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/serf/serf"
"os"
"testing"
"errors"
"time"
)
@ -18,9 +20,6 @@ func TestLeader_RegisterMember(t *testing.T) {
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Wait until we have a leader
time.Sleep(100 * time.Millisecond)
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
@ -28,15 +27,17 @@ func TestLeader_RegisterMember(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for registration
time.Sleep(10 * time.Millisecond)
client := rpcClient(t, s1)
testutil.WaitForLeader(t, client.Call, "dc1")
// Client should be registered
state := s1.fsm.State()
_, found, _ := state.GetNode(c1.config.NodeName)
if !found {
testutil.WaitForResult(func() (bool, error) {
_, found, _ := state.GetNode(c1.config.NodeName)
return found == true, nil
}, func(err error) {
t.Fatalf("client not registered")
}
})
// Should have a check
_, checks := state.NodeChecks(c1.config.NodeName)
@ -54,7 +55,7 @@ func TestLeader_RegisterMember(t *testing.T) {
}
// Server should be registered
_, found, _ = state.GetNode(s1.config.NodeName)
_, found, _ := state.GetNode(s1.config.NodeName)
if !found {
t.Fatalf("server not registered")
}
@ -75,8 +76,8 @@ func TestLeader_FailedMember(t *testing.T) {
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Wait until we have a leader
time.Sleep(100 * time.Millisecond)
client := rpcClient(t, s1)
testutil.WaitForLeader(t, client.Call, "dc1")
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
@ -88,15 +89,14 @@ func TestLeader_FailedMember(t *testing.T) {
// Fail the member
c1.Shutdown()
// Wait for failure detection
time.Sleep(500 * time.Millisecond)
// Should be registered
state := s1.fsm.State()
_, found, _ := state.GetNode(c1.config.NodeName)
if !found {
testutil.WaitForResult(func() (bool, error) {
_, found, _ := state.GetNode(c1.config.NodeName)
return found == true, nil
}, func(err error) {
t.Fatalf("client not registered")
}
})
// Should have a check
_, checks := state.NodeChecks(c1.config.NodeName)
@ -109,9 +109,13 @@ func TestLeader_FailedMember(t *testing.T) {
if checks[0].Name != SerfCheckName {
t.Fatalf("bad check: %v", checks[0])
}
if checks[0].Status != structs.HealthCritical {
t.Fatalf("bad check: %v", checks[0])
}
testutil.WaitForResult(func() (bool, error) {
_, checks = state.NodeChecks(c1.config.NodeName)
return checks[0].Status == structs.HealthCritical, errors.New(checks[0].Status)
}, func(err error) {
t.Fatalf("check status is %v, should be critical", err)
})
}
func TestLeader_LeftMember(t *testing.T) {
@ -123,9 +127,6 @@ func TestLeader_LeftMember(t *testing.T) {
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Wait until we have a leader
time.Sleep(100 * time.Millisecond)
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
@ -133,28 +134,28 @@ func TestLeader_LeftMember(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for registration
time.Sleep(10 * time.Millisecond)
var found bool
state := s1.fsm.State()
// Should be registered
state := s1.fsm.State()
_, found, _ := state.GetNode(c1.config.NodeName)
if !found {
t.Fatalf("client not registered")
}
testutil.WaitForResult(func() (bool, error) {
_, found, _ = state.GetNode(c1.config.NodeName)
return found == true, nil
}, func(err error) {
t.Fatalf("client should be registered")
})
// Node should leave
c1.Leave()
c1.Shutdown()
// Wait for failure detection
time.Sleep(500 * time.Millisecond)
// Should be deregistered
_, found, _ = state.GetNode(c1.config.NodeName)
if found {
t.Fatalf("client registered")
}
testutil.WaitForResult(func() (bool, error) {
_, found, _ = state.GetNode(c1.config.NodeName)
return found == false, nil
}, func(err error) {
t.Fatalf("client should not be registered")
})
}
func TestLeader_ReapMember(t *testing.T) {
@ -166,9 +167,6 @@ func TestLeader_ReapMember(t *testing.T) {
defer os.RemoveAll(dir2)
defer c1.Shutdown()
// Wait until we have a leader
time.Sleep(100 * time.Millisecond)
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
@ -176,15 +174,16 @@ func TestLeader_ReapMember(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait for registration
time.Sleep(10 * time.Millisecond)
var found bool
state := s1.fsm.State()
// Should be registered
state := s1.fsm.State()
_, found, _ := state.GetNode(c1.config.NodeName)
if !found {
t.Fatalf("client not registered")
}
testutil.WaitForResult(func() (bool, error) {
_, found, _ = state.GetNode(c1.config.NodeName)
return found == true, nil
}, func(err error) {
t.Fatalf("client should be registered")
})
// Simulate a node reaping
mems := s1.LANMembers()
@ -198,14 +197,13 @@ func TestLeader_ReapMember(t *testing.T) {
}
s1.reconcileCh <- c1mem
// Wait to reconcile
time.Sleep(10 * time.Millisecond)
// Should be deregistered
_, found, _ = state.GetNode(c1.config.NodeName)
if found {
t.Fatalf("client registered")
}
testutil.WaitForResult(func() (bool, error) {
_, found, _ = state.GetNode(c1.config.NodeName)
return found == false, nil
}, func(err error) {
t.Fatalf("client should not be registered")
})
}
func TestLeader_Reconcile_ReapMember(t *testing.T) {
@ -213,8 +211,8 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) {
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// Wait until we have a leader
time.Sleep(100 * time.Millisecond)
client := rpcClient(t, s1)
testutil.WaitForLeader(t, client.Call, "dc1")
// Register a non-existing member
dead := structs.RegisterRequest{
@ -269,14 +267,13 @@ func TestLeader_Reconcile(t *testing.T) {
t.Fatalf("client registered")
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Should be registered
_, found, _ = state.GetNode(c1.config.NodeName)
if !found {
t.Fatalf("client not registered")
}
testutil.WaitForResult(func() (bool, error) {
_, found, _ = state.GetNode(c1.config.NodeName)
return found == true, nil
}, func(err error) {
t.Fatalf("client should be registered")
})
}
func TestLeader_LeftServer(t *testing.T) {
@ -303,48 +300,31 @@ func TestLeader_LeftServer(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait until we have 3 peers
start := time.Now()
CHECK1:
for _, s := range servers {
peers, _ := s.raftPeers.Peers()
if len(peers) != 3 {
if time.Now().Sub(start) >= 2*time.Second {
t.Fatalf("should have 3 peers")
} else {
time.Sleep(100 * time.Millisecond)
goto CHECK1
}
}
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.raftPeers.Peers()
return len(peers) == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
// Kill any server
servers[0].Shutdown()
// Wait for failure detection
time.Sleep(500 * time.Millisecond)
time.Sleep(100 * time.Millisecond)
// Force remove the non-leader (transition to left state)
if err := servers[1].RemoveFailedNode(servers[0].config.NodeName); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for intent propagation
time.Sleep(500 * time.Millisecond)
// Wait until we have 2 peers
start = time.Now()
CHECK2:
for _, s := range servers[1:] {
peers, _ := s.raftPeers.Peers()
if len(peers) != 2 {
if time.Now().Sub(start) >= 2*time.Second {
t.Fatalf("should have 2 peers")
} else {
time.Sleep(100 * time.Millisecond)
goto CHECK2
}
}
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.raftPeers.Peers()
return len(peers) == 2, nil
}, func(err error) {
t.Fatalf("should have 2 peers")
})
}
}
@ -366,24 +346,15 @@ func TestLeader_MultiBootstrap(t *testing.T) {
t.Fatalf("err: %v", err)
}
// Wait until we have 2 peers
start := time.Now()
CHECK1:
for _, s := range servers {
peers := s.serfLAN.Members()
if len(peers) != 2 {
if time.Now().Sub(start) >= 2*time.Second {
t.Fatalf("should have 2 peers")
} else {
time.Sleep(100 * time.Millisecond)
goto CHECK1
}
}
testutil.WaitForResult(func() (bool, error) {
peers := s.serfLAN.Members()
return len(peers) == 2, nil
}, func(err error) {
t.Fatalf("should have 2 peers")
})
}
// Wait to ensure no peer is added
time.Sleep(200 * time.Millisecond)
// Ensure we don't have multiple raft peers
for _, s := range servers {
peers, _ := s.raftPeers.Peers()

View File

@ -2,11 +2,13 @@ package consul
import (
"fmt"
"github.com/hashicorp/consul/testutil"
"io/ioutil"
"net"
"os"
"testing"
"time"
"errors"
)
var nextPort = 15000
@ -134,13 +136,17 @@ func TestServer_JoinLAN(t *testing.T) {
}
// Check the members
if len(s1.LANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s1.LANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
}
})
if len(s2.LANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s2.LANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
}
})
}
func TestServer_JoinWAN(t *testing.T) {
@ -160,24 +166,28 @@ func TestServer_JoinWAN(t *testing.T) {
}
// Check the members
if len(s1.WANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s1.WANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
}
})
if len(s2.WANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s2.WANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
}
time.Sleep(10 * time.Millisecond)
})
// Check the remoteConsuls has both
if len(s1.remoteConsuls) != 2 {
t.Fatalf("remote consul missing")
}
if len(s2.remoteConsuls) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s2.remoteConsuls) == 2, nil
}, func(err error) {
t.Fatalf("remote consul missing")
}
})
}
func TestServer_Leave(t *testing.T) {
@ -197,17 +207,22 @@ func TestServer_Leave(t *testing.T) {
t.Fatalf("err: %v", err)
}
time.Sleep(time.Second)
var p1 []net.Addr
var p2 []net.Addr
p1, _ := s1.raftPeers.Peers()
if len(p1) != 2 {
t.Fatalf("should have 2 peers: %v", p1)
}
testutil.WaitForResult(func() (bool, error) {
p1, _ = s1.raftPeers.Peers()
return len(p1) == 2, errors.New(fmt.Sprintf("%v", p1))
}, func(err error) {
t.Fatalf("should have 2 peers: %v", err)
})
p2, _ := s2.raftPeers.Peers()
if len(p2) != 2 {
t.Fatalf("should have 2 peers: %v", p2)
}
testutil.WaitForResult(func() (bool, error) {
p2, _ = s2.raftPeers.Peers()
return len(p2) == 2, errors.New(fmt.Sprintf("%v", p1))
}, func(err error) {
t.Fatalf("should have 2 peers: %v", err)
})
// Issue a leave
if err := s2.Leave(); err != nil {
@ -264,22 +279,28 @@ func TestServer_JoinLAN_TLS(t *testing.T) {
}
// Check the members
if len(s1.LANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s1.LANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
}
})
if len(s2.LANMembers()) != 2 {
testutil.WaitForResult(func() (bool, error) {
return len(s2.LANMembers()) == 2, nil
}, func(err error) {
t.Fatalf("bad len")
}
// Wait a while
time.Sleep(100 * time.Millisecond)
})
// Verify Raft has established a peer
if s1.Stats()["raft"]["num_peers"] != "1" {
t.Fatalf("bad: %v", s1.Stats()["raft"])
}
if s2.Stats()["raft"]["num_peers"] != "1" {
t.Fatalf("bad: %v", s2.Stats()["raft"])
}
testutil.WaitForResult(func() (bool, error) {
return s1.Stats()["raft"]["num_peers"] == "1", nil
}, func(err error) {
t.Fatalf("no peer established")
})
testutil.WaitForResult(func() (bool, error) {
return s2.Stats()["raft"]["num_peers"] == "1", nil
}, func(err error) {
t.Fatalf("no peer established")
})
}

View File

@ -1,6 +1,7 @@
package consul
import (
"github.com/hashicorp/consul/testutil"
"github.com/ugorji/go/codec"
"net"
"net/rpc"
@ -38,7 +39,7 @@ func TestStatusLeader(t *testing.T) {
t.Fatalf("unexpected leader: %v", leader)
}
time.Sleep(100 * time.Millisecond)
testutil.WaitForLeader(t, client.Call, "dc1")
if err := client.Call("Status.Leader", arg, &leader); err != nil {
t.Fatalf("err: %v", err)

44
testutil/wait.go Normal file
View File

@ -0,0 +1,44 @@
package testutil
import (
"time"
"testing"
"github.com/hashicorp/consul/consul/structs"
)
type testFn func() (bool, error)
type errorFn func(error)
func WaitForResult(test testFn, error errorFn) {
retries := 1000
for retries > 0 {
time.Sleep(10 * time.Millisecond)
retries--
success, err := test()
if success {
return
}
if retries == 0 {
error(err)
}
}
}
type rpcFn func(string, interface {}, interface {}) error
func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes {
var out structs.IndexedNodes
WaitForResult(func() (bool, error) {
args := &structs.RegisterRequest{
Datacenter: dc,
}
err := rpc("Catalog.ListNodes", args, &out)
return out.QueryMeta.KnownLeader, err
}, func(err error) {
t.Fatalf("failed to find leader: %v", err)
})
return out
}

1
testutil/wait_test.go Normal file
View File

@ -0,0 +1 @@
package testutil