Merge pull request #5376 from hashicorp/fix-tests
Fix tests in prep for CircleCI Migration
This commit is contained in:
commit
aacb81a566
|
@ -133,6 +133,7 @@ func TestAgent_RPCPing(t *testing.T) {
|
|||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
var out struct{}
|
||||
if err := a.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
||||
|
|
|
@ -517,6 +517,9 @@ func TestCheckTCPPassing(t *testing.T) {
|
|||
if os.Getenv("TRAVIS") == "true" {
|
||||
t.Skip("IPV6 not supported on travis-ci")
|
||||
}
|
||||
if os.Getenv("CIRCLECI") == "true" {
|
||||
t.Skip("IPV6 not supported on CircleCI")
|
||||
}
|
||||
tcpServer = mockTCPServer(`tcp6`)
|
||||
expectTCPStatus(t, tcpServer.Addr().String(), api.HealthPassing)
|
||||
tcpServer.Close()
|
||||
|
|
|
@ -84,8 +84,10 @@ func TestClient_JoinLAN(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer c1.Shutdown()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
// Try to join
|
||||
joinLAN(t, c1, s1)
|
||||
testrpc.WaitForTestAgent(t, c1.RPC, "dc1")
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := c1.routers.NumServers(), 1; got != want {
|
||||
r.Fatalf("got %d servers want %d", got, want)
|
||||
|
|
|
@ -162,10 +162,11 @@ func TestResetSessionTimerLocked(t *testing.T) {
|
|||
t.Fatalf("missing timer")
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond * structs.SessionTTLMultiplier)
|
||||
if s1.sessionTimers.Get("foo") != nil {
|
||||
t.Fatalf("timer should be gone")
|
||||
}
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if s1.sessionTimers.Get("foo") != nil {
|
||||
r.Fatal("timer should be gone")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestResetSessionTimerLocked_Renew(t *testing.T) {
|
||||
|
|
|
@ -4151,7 +4151,7 @@ func testDNSServiceLookupResponseLimits(t *testing.T, answerLimit int, qType uin
|
|||
}
|
||||
`)
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
for i := 0; i < generateNumNodes; i++ {
|
||||
nodeAddress := fmt.Sprintf("127.0.0.%d", i+1)
|
||||
|
@ -4241,7 +4241,7 @@ func checkDNSService(t *testing.T, generateNumNodes int, aRecordLimit int, qType
|
|||
}
|
||||
`)
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
for i := 0; i < generateNumNodes; i++ {
|
||||
nodeAddress := fmt.Sprintf("127.0.0.%d", i+1)
|
||||
|
|
|
@ -1733,18 +1733,18 @@ func TestAgent_CheckCriticalTime(t *testing.T) {
|
|||
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
|
||||
t.Fatalf("should have a critical check")
|
||||
} else if c.CriticalFor() > time.Millisecond {
|
||||
t.Fatalf("bad: %#v", c)
|
||||
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
||||
}
|
||||
|
||||
// Wait a while, then fail it again and make sure the time keeps track
|
||||
// of the initial failure, and doesn't reset here.
|
||||
// of the initial failure, and doesn't reset here. Since we are sleeping for
|
||||
// 50ms the check should not be any less than that.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
l.UpdateCheck(chk.CheckID, api.HealthCritical, "")
|
||||
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
|
||||
t.Fatalf("should have a critical check")
|
||||
} else if c.CriticalFor() < 25*time.Millisecond ||
|
||||
c.CriticalFor() > 75*time.Millisecond {
|
||||
t.Fatalf("bad: %#v", c)
|
||||
} else if c.CriticalFor() < 50*time.Millisecond {
|
||||
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
||||
}
|
||||
|
||||
// Set it passing again.
|
||||
|
@ -1759,7 +1759,7 @@ func TestAgent_CheckCriticalTime(t *testing.T) {
|
|||
if c, ok := l.CriticalCheckStates()[checkID]; !ok {
|
||||
t.Fatalf("should have a critical check")
|
||||
} else if c.CriticalFor() > time.Millisecond {
|
||||
t.Fatalf("bad: %#v", c)
|
||||
t.Fatalf("bad: %#v, check was critical for %v", c, c.CriticalFor())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -346,6 +346,7 @@ func TestOperator_AutopilotCASConfiguration(t *testing.T) {
|
|||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`))
|
||||
req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body)
|
||||
|
|
|
@ -9,14 +9,16 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
var snap io.Reader
|
||||
t.Run("", func(t *testing.T) {
|
||||
t.Run("create snapshot", func(t *testing.T) {
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
body := bytes.NewBuffer(nil)
|
||||
req, _ := http.NewRequest("GET", "/v1/snapshot?token=root", body)
|
||||
|
@ -40,9 +42,10 @@ func TestSnapshot(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("", func(t *testing.T) {
|
||||
t.Run("restore snapshot", func(t *testing.T) {
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
req, _ := http.NewRequest("PUT", "/v1/snapshot?token=root", snap)
|
||||
resp := httptest.NewRecorder()
|
||||
|
|
|
@ -498,6 +498,7 @@ func TestAPI_ACLToken_List(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
acl := c.ACL()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
policies := prepTokenPolicies(t, acl)
|
||||
|
||||
|
|
|
@ -42,6 +42,8 @@ func TestAPI_AgentMetrics(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
timer := &retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}
|
||||
retry.RunWith(timer, t, func(r *retry.R) {
|
||||
metrics, err := agent.Metrics()
|
||||
|
@ -171,6 +173,7 @@ func TestAPI_AgentServices(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
reg := &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
|
@ -296,6 +299,7 @@ func TestAPI_AgentServices_ManagedConnectProxyDeprecatedUpstreams(t *testing.T)
|
|||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
reg := &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
|
@ -745,6 +749,7 @@ func TestAPI_AgentSetTTLStatus(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
reg := &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
|
@ -959,6 +964,7 @@ func TestAPI_AgentChecks_serviceBound(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
// First register a service
|
||||
serviceReg := &AgentServiceRegistration{
|
||||
|
@ -1200,6 +1206,7 @@ func TestAPI_NodeMaintenance(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
// Enable maintenance mode
|
||||
if err := agent.EnableNodeMaintenance("broken"); err != nil {
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestAPI_CatalogNodes(t *testing.T) {
|
|||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
catalog := c.Catalog()
|
||||
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
|
||||
nodes, meta, err := catalog.Nodes(nil)
|
||||
|
|
|
@ -19,6 +19,8 @@ func TestAPI_ConnectCARoots_empty(t *testing.T) {
|
|||
})
|
||||
defer s.Stop()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
connect := c.Connect()
|
||||
_, _, err := connect.CARoots(nil)
|
||||
|
||||
|
@ -58,6 +60,7 @@ func TestAPI_ConnectCAConfig_get_set(t *testing.T) {
|
|||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
expected := &ConsulCAProviderConfig{
|
||||
RotationPeriod: 90 * 24 * time.Hour,
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ func TestAPI_CoordinateUpdate(t *testing.T) {
|
|||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
node := "foo"
|
||||
_, err := c.Catalog().Register(&CatalogRegistration{
|
||||
Node: node,
|
||||
|
|
|
@ -229,6 +229,8 @@ func TestAPI_HealthChecks_NodeMetaFilter(t *testing.T) {
|
|||
agent := c.Agent()
|
||||
health := c.Health()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
// Make a service with a check
|
||||
reg := &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
|
@ -392,6 +394,8 @@ func TestAPI_HealthConnect(t *testing.T) {
|
|||
agent := c.Agent()
|
||||
health := c.Health()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
// Make a service with a proxy
|
||||
reg := &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
|
|
|
@ -15,6 +15,7 @@ func TestAPI_ClientPutGetDelete(t *testing.T) {
|
|||
|
||||
kv := c.KV()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
// Get a get without a key
|
||||
key := testKey()
|
||||
pair, _, err := kv.Get(key, nil)
|
||||
|
@ -229,6 +230,7 @@ func TestAPI_ClientWatchGet(t *testing.T) {
|
|||
|
||||
kv := c.KV()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
// Get a get without a key
|
||||
key := testKey()
|
||||
pair, meta, err := kv.Get(key, nil)
|
||||
|
|
|
@ -296,6 +296,7 @@ func TestAPI_SemaphoreConflict(t *testing.T) {
|
|||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
lock, session := createTestLock(t, c, "test/sema/.lock")
|
||||
defer session.Destroy(lock.opts.Session, nil)
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ func TestAPI_Snapshot(t *testing.T) {
|
|||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
s.WaitForSerfCheck(t)
|
||||
// Place an initial key into the store.
|
||||
kv := c.KV()
|
||||
key := &KVPair{Key: testKey(), Value: []byte("hello")}
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestLockCommand(t *testing.T) {
|
|||
a := agent.NewTestAgent(t, t.Name(), ``)
|
||||
defer a.Shutdown()
|
||||
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
|
|
Loading…
Reference in New Issue