Remove duplicate constants
This patch removes duplicate internal copies of constants in the structs package which are also defined in the api package. The api.KVOp type with all its values for the TXN endpoint and the api.HealthXXX constants are now used throughout the codebase. This resulted in some circular dependencies in the testutil package which have been resolved by copying code and constants and moving the WaitForLeader function into a separate testrpc package.
This commit is contained in:
parent
58c3b1ff38
commit
9f8f258d4d
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
rawacl "github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
@ -53,7 +53,7 @@ func TestACL_Version8(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -79,7 +79,7 @@ func TestACL_Disabled(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -135,7 +135,7 @@ func TestACL_Special_IDs(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -190,7 +190,7 @@ func TestACL_Down_Deny(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -222,7 +222,7 @@ func TestACL_Down_Allow(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -254,7 +254,7 @@ func TestACL_Down_Extend(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -333,7 +333,7 @@ func TestACL_Cache(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -517,7 +517,7 @@ func TestACL_vetServiceRegister(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -565,7 +565,7 @@ func TestACL_vetServiceUpdate(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -603,7 +603,7 @@ func TestACL_vetCheckRegister(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -688,7 +688,7 @@ func TestACL_vetCheckUpdate(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -746,7 +746,7 @@ func TestACL_filterMembers(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -784,7 +784,7 @@ func TestACL_filterServices(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
@ -817,7 +817,7 @@ func TestACL_filterChecks(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
m := MockServer{catalogPolicy}
|
||||
if err := agent.InjectEndpoint("ACL", &m); err != nil {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul"
|
||||
"github.com/hashicorp/consul/consul/state"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
|
@ -1178,7 +1179,7 @@ func (a *Agent) AddService(service *structs.NodeService, chkTypes CheckTypes, pe
|
|||
Node: a.config.NodeName,
|
||||
CheckID: types.CheckID(checkID),
|
||||
Name: fmt.Sprintf("Service '%s' check", service.Service),
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
Notes: chkType.Notes,
|
||||
ServiceID: service.ID,
|
||||
ServiceName: service.Service,
|
||||
|
@ -1773,7 +1774,7 @@ func (a *Agent) loadChecks(conf *Config) error {
|
|||
} else {
|
||||
// Default check to critical to avoid placing potentially unhealthy
|
||||
// services into the active pool
|
||||
p.Check.Status = structs.HealthCritical
|
||||
p.Check.Status = api.HealthCritical
|
||||
|
||||
if err := a.AddCheck(p.Check, p.ChkType, false, p.Token); err != nil {
|
||||
// Purge the check if it is unable to be restored.
|
||||
|
@ -1883,7 +1884,7 @@ func (a *Agent) EnableServiceMaintenance(serviceID, reason, token string) error
|
|||
Notes: reason,
|
||||
ServiceID: service.ID,
|
||||
ServiceName: service.Service,
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
a.AddCheck(check, nil, true, token)
|
||||
a.logger.Printf("[INFO] agent: Service %q entered maintenance mode", serviceID)
|
||||
|
@ -1929,7 +1930,7 @@ func (a *Agent) EnableNodeMaintenance(reason, token string) {
|
|||
CheckID: structs.NodeMaint,
|
||||
Name: "Node Maintenance Mode",
|
||||
Notes: reason,
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
a.AddCheck(check, nil, true, token)
|
||||
a.logger.Printf("[INFO] agent: Node entered maintenance mode")
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/logger"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -291,7 +292,7 @@ func (s *HTTPServer) AgentCheckPass(resp http.ResponseWriter, req *http.Request)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.agent.updateTTLCheck(checkID, structs.HealthPassing, note); err != nil {
|
||||
if err := s.agent.updateTTLCheck(checkID, api.HealthPassing, note); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.syncChanges()
|
||||
|
@ -309,7 +310,7 @@ func (s *HTTPServer) AgentCheckWarn(resp http.ResponseWriter, req *http.Request)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.agent.updateTTLCheck(checkID, structs.HealthWarning, note); err != nil {
|
||||
if err := s.agent.updateTTLCheck(checkID, api.HealthWarning, note); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.syncChanges()
|
||||
|
@ -327,7 +328,7 @@ func (s *HTTPServer) AgentCheckFail(resp http.ResponseWriter, req *http.Request)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.agent.updateTTLCheck(checkID, structs.HealthCritical, note); err != nil {
|
||||
if err := s.agent.updateTTLCheck(checkID, api.HealthCritical, note); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.syncChanges()
|
||||
|
@ -336,7 +337,7 @@ func (s *HTTPServer) AgentCheckFail(resp http.ResponseWriter, req *http.Request)
|
|||
|
||||
// checkUpdate is the payload for a PUT to AgentCheckUpdate.
|
||||
type checkUpdate struct {
|
||||
// Status us one of the structs.Health* states, "passing", "warning", or
|
||||
// Status us one of the api.Health* states, "passing", "warning", or
|
||||
// "critical".
|
||||
Status string
|
||||
|
||||
|
@ -363,9 +364,9 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques
|
|||
}
|
||||
|
||||
switch update.Status {
|
||||
case structs.HealthPassing:
|
||||
case structs.HealthWarning:
|
||||
case structs.HealthCritical:
|
||||
case api.HealthPassing:
|
||||
case api.HealthWarning:
|
||||
case api.HealthCritical:
|
||||
default:
|
||||
resp.WriteHeader(400)
|
||||
fmt.Fprintf(resp, "Invalid check status: '%s'", update.Status)
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/command/base"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/logger"
|
||||
|
@ -130,7 +131,7 @@ func TestAgent_Checks(t *testing.T) {
|
|||
Node: srv.agent.config.NodeName,
|
||||
CheckID: "mysql",
|
||||
Name: "mysql",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
srv.agent.state.AddCheck(chk1, "")
|
||||
|
||||
|
@ -147,7 +148,7 @@ func TestAgent_Checks(t *testing.T) {
|
|||
if len(val) != 1 {
|
||||
t.Fatalf("bad checks: %v", obj)
|
||||
}
|
||||
if val["mysql"].Status != structs.HealthPassing {
|
||||
if val["mysql"].Status != api.HealthPassing {
|
||||
t.Fatalf("bad check: %v", obj)
|
||||
}
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func TestAgent_Checks_ACLFilter(t *testing.T) {
|
|||
Node: srv.agent.config.NodeName,
|
||||
CheckID: "mysql",
|
||||
Name: "mysql",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
srv.agent.state.AddCheck(chk1, "")
|
||||
|
||||
|
@ -862,7 +863,7 @@ func TestAgent_RegisterCheck(t *testing.T) {
|
|||
|
||||
// By default, checks start in critical state.
|
||||
state := srv.agent.state.Checks()[checkID]
|
||||
if state.Status != structs.HealthCritical {
|
||||
if state.Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %v", state)
|
||||
}
|
||||
}
|
||||
|
@ -883,7 +884,7 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) {
|
|||
CheckType: CheckType{
|
||||
TTL: 15 * time.Second,
|
||||
},
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
req.Body = encodeReq(args)
|
||||
|
||||
|
@ -906,7 +907,7 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) {
|
|||
}
|
||||
|
||||
state := srv.agent.state.Checks()[checkID]
|
||||
if state.Status != structs.HealthPassing {
|
||||
if state.Status != api.HealthPassing {
|
||||
t.Fatalf("bad: %v", state)
|
||||
}
|
||||
}
|
||||
|
@ -1065,7 +1066,7 @@ func TestAgent_PassCheck(t *testing.T) {
|
|||
|
||||
// Ensure we have a check mapping
|
||||
state := srv.agent.state.Checks()["test"]
|
||||
if state.Status != structs.HealthPassing {
|
||||
if state.Status != api.HealthPassing {
|
||||
t.Fatalf("bad: %v", state)
|
||||
}
|
||||
}
|
||||
|
@ -1130,7 +1131,7 @@ func TestAgent_WarnCheck(t *testing.T) {
|
|||
|
||||
// Ensure we have a check mapping
|
||||
state := srv.agent.state.Checks()["test"]
|
||||
if state.Status != structs.HealthWarning {
|
||||
if state.Status != api.HealthWarning {
|
||||
t.Fatalf("bad: %v", state)
|
||||
}
|
||||
}
|
||||
|
@ -1195,7 +1196,7 @@ func TestAgent_FailCheck(t *testing.T) {
|
|||
|
||||
// Ensure we have a check mapping
|
||||
state := srv.agent.state.Checks()["test"]
|
||||
if state.Status != structs.HealthCritical {
|
||||
if state.Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %v", state)
|
||||
}
|
||||
}
|
||||
|
@ -1246,9 +1247,9 @@ func TestAgent_UpdateCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
cases := []checkUpdate{
|
||||
checkUpdate{structs.HealthPassing, "hello-passing"},
|
||||
checkUpdate{structs.HealthCritical, "hello-critical"},
|
||||
checkUpdate{structs.HealthWarning, "hello-warning"},
|
||||
checkUpdate{api.HealthPassing, "hello-passing"},
|
||||
checkUpdate{api.HealthCritical, "hello-critical"},
|
||||
checkUpdate{api.HealthWarning, "hello-warning"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
@ -1284,7 +1285,7 @@ func TestAgent_UpdateCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
update := checkUpdate{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Output: strings.Repeat("-= bad -=", 5*CheckBufSize),
|
||||
}
|
||||
req.Body = encodeReq(update)
|
||||
|
@ -1305,7 +1306,7 @@ func TestAgent_UpdateCheck(t *testing.T) {
|
|||
// rough check that the output buffer was cut down so this test
|
||||
// isn't super brittle.
|
||||
state := srv.agent.state.Checks()["test"]
|
||||
if state.Status != structs.HealthPassing || len(state.Output) > 2*CheckBufSize {
|
||||
if state.Status != api.HealthPassing || len(state.Output) > 2*CheckBufSize {
|
||||
t.Fatalf("bad: %v", state)
|
||||
}
|
||||
}
|
||||
|
@ -1343,7 +1344,7 @@ func TestAgent_UpdateCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
update := checkUpdate{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
req.Body = encodeReq(update)
|
||||
|
||||
|
@ -1378,7 +1379,7 @@ func TestAgent_UpdateCheck_ACLDeny(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
req.Body = encodeReq(checkUpdate{structs.HealthPassing, "hello-passing"})
|
||||
req.Body = encodeReq(checkUpdate{api.HealthPassing, "hello-passing"})
|
||||
_, err = srv.AgentCheckUpdate(nil, req)
|
||||
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1389,7 +1390,7 @@ func TestAgent_UpdateCheck_ACLDeny(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
req.Body = encodeReq(checkUpdate{structs.HealthPassing, "hello-passing"})
|
||||
req.Body = encodeReq(checkUpdate{api.HealthPassing, "hello-passing"})
|
||||
_, err = srv.AgentCheckUpdate(nil, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -15,10 +15,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/logger"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/consul/version"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
@ -626,7 +627,7 @@ func TestAgent_AddCheck(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "mem",
|
||||
Name: "memory util",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
chk := &CheckType{
|
||||
Script: "exit 0",
|
||||
|
@ -644,7 +645,7 @@ func TestAgent_AddCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
// Ensure our check is in the right state
|
||||
if sChk.Status != structs.HealthCritical {
|
||||
if sChk.Status != api.HealthCritical {
|
||||
t.Fatalf("check not critical")
|
||||
}
|
||||
|
||||
|
@ -663,7 +664,7 @@ func TestAgent_AddCheck_StartPassing(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "mem",
|
||||
Name: "memory util",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
chk := &CheckType{
|
||||
Script: "exit 0",
|
||||
|
@ -681,7 +682,7 @@ func TestAgent_AddCheck_StartPassing(t *testing.T) {
|
|||
}
|
||||
|
||||
// Ensure our check is in the right state
|
||||
if sChk.Status != structs.HealthPassing {
|
||||
if sChk.Status != api.HealthPassing {
|
||||
t.Fatalf("check not passing")
|
||||
}
|
||||
|
||||
|
@ -700,7 +701,7 @@ func TestAgent_AddCheck_MinInterval(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "mem",
|
||||
Name: "memory util",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
chk := &CheckType{
|
||||
Script: "exit 0",
|
||||
|
@ -755,7 +756,7 @@ func TestAgent_AddCheck_RestoreState(t *testing.T) {
|
|||
CheckID: "baz",
|
||||
TTL: time.Minute,
|
||||
}
|
||||
err := agent.persistCheckState(ttl, structs.HealthPassing, "yup")
|
||||
err := agent.persistCheckState(ttl, api.HealthPassing, "yup")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -780,7 +781,7 @@ func TestAgent_AddCheck_RestoreState(t *testing.T) {
|
|||
if !ok {
|
||||
t.Fatalf("missing check")
|
||||
}
|
||||
if check.Status != structs.HealthPassing {
|
||||
if check.Status != api.HealthPassing {
|
||||
t.Fatalf("bad: %#v", check)
|
||||
}
|
||||
if check.Output != "yup" {
|
||||
|
@ -807,7 +808,7 @@ func TestAgent_RemoveCheck(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "mem",
|
||||
Name: "memory util",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
chk := &CheckType{
|
||||
Script: "exit 0",
|
||||
|
@ -843,7 +844,7 @@ func TestAgent_updateTTLCheck(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "mem",
|
||||
Name: "memory util",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
chk := &CheckType{
|
||||
TTL: 15 * time.Second,
|
||||
|
@ -854,13 +855,13 @@ func TestAgent_updateTTLCheck(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := agent.updateTTLCheck("mem", structs.HealthPassing, "foo"); err != nil {
|
||||
if err := agent.updateTTLCheck("mem", api.HealthPassing, "foo"); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we have a check mapping.
|
||||
status := agent.state.Checks()["mem"]
|
||||
if status.Status != structs.HealthPassing {
|
||||
if status.Status != api.HealthPassing {
|
||||
t.Fatalf("bad: %v", status)
|
||||
}
|
||||
if status.Output != "foo" {
|
||||
|
@ -873,7 +874,7 @@ func TestAgent_ConsulService(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Consul service is registered
|
||||
services := agent.state.Services()
|
||||
|
@ -1124,7 +1125,7 @@ func TestAgent_PersistCheck(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "mem",
|
||||
Name: "memory check",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
chkType := &CheckType{
|
||||
Script: "/bin/true",
|
||||
|
@ -1197,7 +1198,7 @@ func TestAgent_PersistCheck(t *testing.T) {
|
|||
if !ok {
|
||||
t.Fatalf("bad: %#v", agent2.state.checks)
|
||||
}
|
||||
if result.Status != structs.HealthCritical {
|
||||
if result.Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %#v", result)
|
||||
}
|
||||
if result.Name != "mem1" {
|
||||
|
@ -1223,7 +1224,7 @@ func TestAgent_PurgeCheck(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "mem",
|
||||
Name: "memory check",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
|
||||
file := filepath.Join(agent.config.DataDir, checksDir, checkIDHash(check.CheckID))
|
||||
|
@ -1259,7 +1260,7 @@ func TestAgent_PurgeCheckOnDuplicate(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "mem",
|
||||
Name: "memory check",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
|
||||
// First persist the check
|
||||
|
@ -1345,7 +1346,7 @@ func TestAgent_unloadChecks(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "service:redis",
|
||||
Name: "redischeck",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "redis",
|
||||
ServiceName: "redis",
|
||||
}
|
||||
|
@ -1526,7 +1527,7 @@ func TestAgent_Service_Reap(t *testing.T) {
|
|||
}
|
||||
chkTypes := CheckTypes{
|
||||
&CheckType{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
TTL: 10 * time.Millisecond,
|
||||
DeregisterCriticalServiceAfter: 100 * time.Millisecond,
|
||||
},
|
||||
|
@ -1555,7 +1556,7 @@ func TestAgent_Service_Reap(t *testing.T) {
|
|||
}
|
||||
|
||||
// Pass the TTL.
|
||||
if err := agent.updateTTLCheck("service:redis", structs.HealthPassing, "foo"); err != nil {
|
||||
if err := agent.updateTTLCheck("service:redis", api.HealthPassing, "foo"); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, ok := agent.state.Services()["redis"]; !ok {
|
||||
|
@ -1600,7 +1601,7 @@ func TestAgent_Service_NoReap(t *testing.T) {
|
|||
}
|
||||
chkTypes := CheckTypes{
|
||||
&CheckType{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
TTL: 10 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
@ -1659,7 +1660,7 @@ func TestAgent_addCheck_restoresSnapshot(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "service:redis",
|
||||
Name: "redischeck",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "redis",
|
||||
ServiceName: "redis",
|
||||
}
|
||||
|
@ -1676,7 +1677,7 @@ func TestAgent_addCheck_restoresSnapshot(t *testing.T) {
|
|||
if !ok {
|
||||
t.Fatalf("missing check")
|
||||
}
|
||||
if check.Status != structs.HealthPassing {
|
||||
if check.Status != api.HealthPassing {
|
||||
t.Fatalf("bad: %s", check.Status)
|
||||
}
|
||||
}
|
||||
|
@ -1749,7 +1750,7 @@ func TestAgent_checkStateSnapshot(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "service:redis",
|
||||
Name: "redischeck",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "redis",
|
||||
ServiceName: "redis",
|
||||
}
|
||||
|
@ -1780,7 +1781,7 @@ func TestAgent_checkStateSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
// Make sure state was restored
|
||||
if out.Status != structs.HealthPassing {
|
||||
if out.Status != api.HealthPassing {
|
||||
t.Fatalf("should have restored check state")
|
||||
}
|
||||
}
|
||||
|
@ -1796,7 +1797,7 @@ func TestAgent_loadChecks_checkFails(t *testing.T) {
|
|||
Node: config.NodeName,
|
||||
CheckID: "service:redis",
|
||||
Name: "redischeck",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "nope",
|
||||
}
|
||||
if err := agent.persistCheck(check, nil); err != nil {
|
||||
|
@ -1834,7 +1835,7 @@ func TestAgent_persistCheckState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Persist some check state for the check
|
||||
err := agent.persistCheckState(check, structs.HealthCritical, "nope")
|
||||
err := agent.persistCheckState(check, api.HealthCritical, "nope")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1859,7 +1860,7 @@ func TestAgent_persistCheckState(t *testing.T) {
|
|||
if p.Output != "nope" {
|
||||
t.Fatalf("bad: %#v", p)
|
||||
}
|
||||
if p.Status != structs.HealthCritical {
|
||||
if p.Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %#v", p)
|
||||
}
|
||||
|
||||
|
@ -1882,7 +1883,7 @@ func TestAgent_loadCheckState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Persist the check state
|
||||
err := agent.persistCheckState(check, structs.HealthPassing, "yup")
|
||||
err := agent.persistCheckState(check, api.HealthPassing, "yup")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1890,14 +1891,14 @@ func TestAgent_loadCheckState(t *testing.T) {
|
|||
// Try to load the state
|
||||
health := &structs.HealthCheck{
|
||||
CheckID: "check1",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
}
|
||||
if err := agent.loadCheckState(health); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Should not have restored the status due to expiration
|
||||
if health.Status != structs.HealthCritical {
|
||||
if health.Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %#v", health)
|
||||
}
|
||||
if health.Output != "" {
|
||||
|
@ -1912,7 +1913,7 @@ func TestAgent_loadCheckState(t *testing.T) {
|
|||
|
||||
// Set a TTL which will not expire before we check it
|
||||
check.TTL = time.Minute
|
||||
err = agent.persistCheckState(check, structs.HealthPassing, "yup")
|
||||
err = agent.persistCheckState(check, api.HealthPassing, "yup")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1923,7 +1924,7 @@ func TestAgent_loadCheckState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should have restored
|
||||
if health.Status != structs.HealthPassing {
|
||||
if health.Status != api.HealthPassing {
|
||||
t.Fatalf("bad: %#v", health)
|
||||
}
|
||||
if health.Output != "yup" {
|
||||
|
@ -1947,7 +1948,7 @@ func TestAgent_purgeCheckState(t *testing.T) {
|
|||
CheckID: "check1",
|
||||
TTL: time.Minute,
|
||||
}
|
||||
err := agent.persistCheckState(check, structs.HealthPassing, "yup")
|
||||
err := agent.persistCheckState(check, api.HealthPassing, "yup")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
)
|
||||
|
||||
|
@ -19,7 +19,7 @@ func TestCatalogRegister(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
req, err := http.NewRequest("GET", "/v1/catalog/register", nil)
|
||||
|
@ -60,7 +60,7 @@ func TestCatalogDeregister(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
req, err := http.NewRequest("GET", "/v1/catalog/deregister", nil)
|
||||
|
@ -89,7 +89,7 @@ func TestCatalogDatacenters(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
obj, err := srv.CatalogDatacenters(nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -111,7 +111,7 @@ func TestCatalogNodes(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -151,7 +151,7 @@ func TestCatalogNodes_MetaFilter(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a meta field
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -201,7 +201,7 @@ func TestCatalogNodes_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer srv1.Shutdown()
|
||||
defer srv1.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
|
||||
dir2, srv2 := makeHTTPServerWithConfig(t,
|
||||
func(c *Config) {
|
||||
|
@ -211,7 +211,7 @@ func TestCatalogNodes_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer srv2.Shutdown()
|
||||
defer srv2.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
|
||||
// Wait for the WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -219,7 +219,7 @@ func TestCatalogNodes_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(srv1.agent.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
@ -302,7 +302,7 @@ func TestCatalogNodes_Blocking(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.DCSpecificRequest{
|
||||
|
@ -363,7 +363,7 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register nodes.
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -458,7 +458,7 @@ func TestCatalogServices(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -500,7 +500,7 @@ func TestCatalogServices_NodeMetaFilter(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -548,7 +548,7 @@ func TestCatalogServiceNodes(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Make sure an empty list is returned, not a nil
|
||||
{
|
||||
|
@ -612,7 +612,7 @@ func TestCatalogServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Make sure an empty list is returned, not a nil
|
||||
{
|
||||
|
@ -681,7 +681,7 @@ func TestCatalogServiceNodes_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer srv1.Shutdown()
|
||||
defer srv1.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
|
||||
dir2, srv2 := makeHTTPServerWithConfig(t,
|
||||
func(c *Config) {
|
||||
|
@ -691,7 +691,7 @@ func TestCatalogServiceNodes_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer srv2.Shutdown()
|
||||
defer srv2.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
|
||||
// Wait for the WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -699,7 +699,7 @@ func TestCatalogServiceNodes_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(srv1.agent.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
@ -773,7 +773,7 @@ func TestCatalogServiceNodes_DistanceSort(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register nodes.
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -875,7 +875,7 @@ func TestCatalogNodeServices(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -920,7 +920,7 @@ func TestCatalogNodeServices_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer srv1.Shutdown()
|
||||
defer srv1.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
|
||||
dir2, srv2 := makeHTTPServerWithConfig(t,
|
||||
func(c *Config) {
|
||||
|
@ -930,7 +930,7 @@ func TestCatalogNodeServices_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer srv2.Shutdown()
|
||||
defer srv2.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
|
||||
// Wait for the WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -938,7 +938,7 @@ func TestCatalogNodeServices_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(srv1.agent.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
"github.com/armon/circbuf"
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -159,7 +160,7 @@ func (c *CheckMonitor) check() {
|
|||
cmd, err := ExecScript(c.Script)
|
||||
if err != nil {
|
||||
c.Logger.Printf("[ERR] agent: failed to setup invoke '%s': %s", c.Script, err)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -171,7 +172,7 @@ func (c *CheckMonitor) check() {
|
|||
// Start the check
|
||||
if err := cmd.Start(); err != nil {
|
||||
c.Logger.Printf("[ERR] agent: failed to invoke '%s': %s", c.Script, err)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -203,7 +204,7 @@ func (c *CheckMonitor) check() {
|
|||
// Check if the check passed
|
||||
if err == nil {
|
||||
c.Logger.Printf("[DEBUG] agent: Check '%v' is passing", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, outputStr)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthPassing, outputStr)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -214,7 +215,7 @@ func (c *CheckMonitor) check() {
|
|||
code := status.ExitStatus()
|
||||
if code == 1 {
|
||||
c.Logger.Printf("[WARN] agent: Check '%v' is now warning", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthWarning, outputStr)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthWarning, outputStr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -222,7 +223,7 @@ func (c *CheckMonitor) check() {
|
|||
|
||||
// Set the health as critical
|
||||
c.Logger.Printf("[WARN] agent: Check '%v' is now critical", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, outputStr)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, outputStr)
|
||||
}
|
||||
|
||||
// CheckTTL is used to apply a TTL to check status,
|
||||
|
@ -273,7 +274,7 @@ func (c *CheckTTL) run() {
|
|||
case <-c.timer.C:
|
||||
c.Logger.Printf("[WARN] agent: Check '%v' missed TTL, is now critical",
|
||||
c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, c.getExpiredOutput())
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, c.getExpiredOutput())
|
||||
|
||||
case <-c.stopCh:
|
||||
return
|
||||
|
@ -423,7 +424,7 @@ func (c *CheckHTTP) check() {
|
|||
req, err := http.NewRequest("GET", c.HTTP, nil)
|
||||
if err != nil {
|
||||
c.Logger.Printf("[WARN] agent: http request failed '%s': %s", c.HTTP, err)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -433,7 +434,7 @@ func (c *CheckHTTP) check() {
|
|||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
c.Logger.Printf("[WARN] agent: http request failed '%s': %s", c.HTTP, err)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
@ -450,19 +451,19 @@ func (c *CheckHTTP) check() {
|
|||
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
|
||||
// PASSING (2xx)
|
||||
c.Logger.Printf("[DEBUG] agent: Check '%v' is passing", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, result)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthPassing, result)
|
||||
|
||||
} else if resp.StatusCode == 429 {
|
||||
// WARNING
|
||||
// 429 Too Many Requests (RFC 6585)
|
||||
// The user has sent too many requests in a given amount of time.
|
||||
c.Logger.Printf("[WARN] agent: Check '%v' is now warning", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthWarning, result)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthWarning, result)
|
||||
|
||||
} else {
|
||||
// CRITICAL
|
||||
c.Logger.Printf("[WARN] agent: Check '%v' is now critical", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, result)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, result)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -541,12 +542,12 @@ func (c *CheckTCP) check() {
|
|||
conn, err := c.dialer.Dial(`tcp`, c.TCP)
|
||||
if err != nil {
|
||||
c.Logger.Printf("[WARN] agent: socket connection failed '%s': %s", c.TCP, err)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||
return
|
||||
}
|
||||
conn.Close()
|
||||
c.Logger.Printf("[DEBUG] agent: Check '%v' is passing", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, fmt.Sprintf("TCP connect %s: Success", c.TCP))
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("TCP connect %s: Success", c.TCP))
|
||||
}
|
||||
|
||||
// A custom interface since go-dockerclient doesn't have one
|
||||
|
@ -650,7 +651,7 @@ func (c *CheckDocker) check() {
|
|||
)
|
||||
if exec, err = c.dockerClient.CreateExec(execOpts); err != nil {
|
||||
c.Logger.Printf("[DEBUG] agent: Error while creating Exec: %s", err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, fmt.Sprintf("Unable to create Exec, error: %s", err.Error()))
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, fmt.Sprintf("Unable to create Exec, error: %s", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -660,7 +661,7 @@ func (c *CheckDocker) check() {
|
|||
err = c.dockerClient.StartExec(exec.ID, docker.StartExecOptions{Detach: false, Tty: false, OutputStream: output, ErrorStream: output})
|
||||
if err != nil {
|
||||
c.Logger.Printf("[DEBUG] Error in executing health checks: %s", err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, fmt.Sprintf("Unable to start Exec: %s", err.Error()))
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, fmt.Sprintf("Unable to start Exec: %s", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -677,26 +678,26 @@ func (c *CheckDocker) check() {
|
|||
execInfo, err := c.dockerClient.InspectExec(exec.ID)
|
||||
if err != nil {
|
||||
c.Logger.Printf("[DEBUG] Error in inspecting check result : %s", err.Error())
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, fmt.Sprintf("Unable to inspect Exec: %s", err.Error()))
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, fmt.Sprintf("Unable to inspect Exec: %s", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Sets the status of the check to healthy if exit code is 0
|
||||
if execInfo.ExitCode == 0 {
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, outputStr)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthPassing, outputStr)
|
||||
return
|
||||
}
|
||||
|
||||
// Set the status of the check to Warning if exit code is 1
|
||||
if execInfo.ExitCode == 1 {
|
||||
c.Logger.Printf("[DEBUG] Check failed with exit code: %d", execInfo.ExitCode)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthWarning, outputStr)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthWarning, outputStr)
|
||||
return
|
||||
}
|
||||
|
||||
// Set the health as critical
|
||||
c.Logger.Printf("[WARN] agent: Check '%v' is now critical", c.CheckID)
|
||||
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, outputStr)
|
||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, outputStr)
|
||||
}
|
||||
|
||||
func shell() string {
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"time"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
@ -96,19 +96,19 @@ func expectStatus(t *testing.T, script, status string) {
|
|||
}
|
||||
|
||||
func TestCheckMonitor_Passing(t *testing.T) {
|
||||
expectStatus(t, "exit 0", structs.HealthPassing)
|
||||
expectStatus(t, "exit 0", api.HealthPassing)
|
||||
}
|
||||
|
||||
func TestCheckMonitor_Warning(t *testing.T) {
|
||||
expectStatus(t, "exit 1", structs.HealthWarning)
|
||||
expectStatus(t, "exit 1", api.HealthWarning)
|
||||
}
|
||||
|
||||
func TestCheckMonitor_Critical(t *testing.T) {
|
||||
expectStatus(t, "exit 2", structs.HealthCritical)
|
||||
expectStatus(t, "exit 2", api.HealthCritical)
|
||||
}
|
||||
|
||||
func TestCheckMonitor_BadCmd(t *testing.T) {
|
||||
expectStatus(t, "foobarbaz", structs.HealthCritical)
|
||||
expectStatus(t, "foobarbaz", api.HealthCritical)
|
||||
}
|
||||
|
||||
func TestCheckMonitor_Timeout(t *testing.T) {
|
||||
|
@ -163,8 +163,8 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
|
|||
t.Fatalf("should have 1 or more updates %v", mock.updates)
|
||||
}
|
||||
|
||||
if mock.State("foo") != structs.HealthPassing {
|
||||
t.Fatalf("should be %v %v", structs.HealthPassing, mock.state)
|
||||
if mock.State("foo") != api.HealthPassing {
|
||||
t.Fatalf("should be %v %v", api.HealthPassing, mock.state)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,13 +208,13 @@ func TestCheckTTL(t *testing.T) {
|
|||
defer check.Stop()
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
check.SetStatus(structs.HealthPassing, "test-output")
|
||||
check.SetStatus(api.HealthPassing, "test-output")
|
||||
|
||||
if mock.Updates("foo") != 1 {
|
||||
t.Fatalf("should have 1 updates %v", mock.updates)
|
||||
}
|
||||
|
||||
if mock.State("foo") != structs.HealthPassing {
|
||||
if mock.State("foo") != api.HealthPassing {
|
||||
t.Fatalf("should be passing %v", mock.state)
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ func TestCheckTTL(t *testing.T) {
|
|||
t.Fatalf("should have 2 updates %v", mock.updates)
|
||||
}
|
||||
|
||||
if mock.State("foo") != structs.HealthCritical {
|
||||
if mock.State("foo") != api.HealthCritical {
|
||||
t.Fatalf("should be critical %v", mock.state)
|
||||
}
|
||||
|
||||
|
@ -307,25 +307,25 @@ func TestCheckHTTPCritical(t *testing.T) {
|
|||
|
||||
server := mockHTTPServer(150)
|
||||
fmt.Println(server.URL)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthCritical)
|
||||
expectHTTPStatus(t, server.URL, api.HealthCritical)
|
||||
server.Close()
|
||||
|
||||
// 2xx - 1
|
||||
server = mockHTTPServer(199)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthCritical)
|
||||
expectHTTPStatus(t, server.URL, api.HealthCritical)
|
||||
server.Close()
|
||||
|
||||
// 2xx + 1
|
||||
server = mockHTTPServer(300)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthCritical)
|
||||
expectHTTPStatus(t, server.URL, api.HealthCritical)
|
||||
server.Close()
|
||||
|
||||
server = mockHTTPServer(400)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthCritical)
|
||||
expectHTTPStatus(t, server.URL, api.HealthCritical)
|
||||
server.Close()
|
||||
|
||||
server = mockHTTPServer(500)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthCritical)
|
||||
expectHTTPStatus(t, server.URL, api.HealthCritical)
|
||||
server.Close()
|
||||
}
|
||||
|
||||
|
@ -333,25 +333,25 @@ func TestCheckHTTPPassing(t *testing.T) {
|
|||
var server *httptest.Server
|
||||
|
||||
server = mockHTTPServer(200)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthPassing)
|
||||
expectHTTPStatus(t, server.URL, api.HealthPassing)
|
||||
server.Close()
|
||||
|
||||
server = mockHTTPServer(201)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthPassing)
|
||||
expectHTTPStatus(t, server.URL, api.HealthPassing)
|
||||
server.Close()
|
||||
|
||||
server = mockHTTPServer(250)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthPassing)
|
||||
expectHTTPStatus(t, server.URL, api.HealthPassing)
|
||||
server.Close()
|
||||
|
||||
server = mockHTTPServer(299)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthPassing)
|
||||
expectHTTPStatus(t, server.URL, api.HealthPassing)
|
||||
server.Close()
|
||||
}
|
||||
|
||||
func TestCheckHTTPWarning(t *testing.T) {
|
||||
server := mockHTTPServer(429)
|
||||
expectHTTPStatus(t, server.URL, structs.HealthWarning)
|
||||
expectHTTPStatus(t, server.URL, api.HealthWarning)
|
||||
server.Close()
|
||||
}
|
||||
|
||||
|
@ -394,7 +394,7 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
|||
return false, fmt.Errorf("should have at least 2 updates %v", mock.updates)
|
||||
}
|
||||
|
||||
if mock.state["bar"] != structs.HealthCritical {
|
||||
if mock.state["bar"] != api.HealthCritical {
|
||||
return false, fmt.Errorf("should be critical %v", mock.state)
|
||||
}
|
||||
return true, nil
|
||||
|
@ -462,7 +462,7 @@ func TestCheckHTTP_TLSSkipVerify_true_pass(t *testing.T) {
|
|||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if mock.state["skipverify_true"] != structs.HealthPassing {
|
||||
if mock.state["skipverify_true"] != api.HealthPassing {
|
||||
return false, fmt.Errorf("should be passing %v", mock.state)
|
||||
}
|
||||
return true, nil
|
||||
|
@ -497,7 +497,7 @@ func TestCheckHTTP_TLSSkipVerify_true_fail(t *testing.T) {
|
|||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if mock.state["skipverify_true"] != structs.HealthCritical {
|
||||
if mock.state["skipverify_true"] != api.HealthCritical {
|
||||
return false, fmt.Errorf("should be critical %v", mock.state)
|
||||
}
|
||||
return true, nil
|
||||
|
@ -534,7 +534,7 @@ func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) {
|
|||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
// This should fail due to an invalid SSL cert
|
||||
if mock.state["skipverify_false"] != structs.HealthCritical {
|
||||
if mock.state["skipverify_false"] != api.HealthCritical {
|
||||
return false, fmt.Errorf("should be critical %v", mock.state)
|
||||
}
|
||||
|
||||
|
@ -603,7 +603,7 @@ func TestCheckTCPCritical(t *testing.T) {
|
|||
)
|
||||
|
||||
tcpServer = mockTCPServer(`tcp`)
|
||||
expectTCPStatus(t, `127.0.0.1:0`, structs.HealthCritical)
|
||||
expectTCPStatus(t, `127.0.0.1:0`, api.HealthCritical)
|
||||
tcpServer.Close()
|
||||
}
|
||||
|
||||
|
@ -613,11 +613,11 @@ func TestCheckTCPPassing(t *testing.T) {
|
|||
)
|
||||
|
||||
tcpServer = mockTCPServer(`tcp`)
|
||||
expectTCPStatus(t, tcpServer.Addr().String(), structs.HealthPassing)
|
||||
expectTCPStatus(t, tcpServer.Addr().String(), api.HealthPassing)
|
||||
tcpServer.Close()
|
||||
|
||||
tcpServer = mockTCPServer(`tcp6`)
|
||||
expectTCPStatus(t, tcpServer.Addr().String(), structs.HealthPassing)
|
||||
expectTCPStatus(t, tcpServer.Addr().String(), api.HealthPassing)
|
||||
tcpServer.Close()
|
||||
}
|
||||
|
||||
|
@ -785,27 +785,27 @@ func expectDockerCheckStatus(t *testing.T, dockerClient DockerClient, status str
|
|||
}
|
||||
|
||||
func TestDockerCheckWhenExecReturnsSuccessExitCode(t *testing.T) {
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithNoErrors{}, structs.HealthPassing, "output")
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithNoErrors{}, api.HealthPassing, "output")
|
||||
}
|
||||
|
||||
func TestDockerCheckWhenExecCreationFails(t *testing.T) {
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithCreateExecFailure{}, structs.HealthCritical, "Unable to create Exec, error: Exec Creation Failed")
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithCreateExecFailure{}, api.HealthCritical, "Unable to create Exec, error: Exec Creation Failed")
|
||||
}
|
||||
|
||||
func TestDockerCheckWhenExitCodeIsNonZero(t *testing.T) {
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithExecNonZeroExitCode{}, structs.HealthCritical, "")
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithExecNonZeroExitCode{}, api.HealthCritical, "")
|
||||
}
|
||||
|
||||
func TestDockerCheckWhenExitCodeIsone(t *testing.T) {
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithExecExitCodeOne{}, structs.HealthWarning, "output")
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithExecExitCodeOne{}, api.HealthWarning, "output")
|
||||
}
|
||||
|
||||
func TestDockerCheckWhenExecStartFails(t *testing.T) {
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithStartExecFailure{}, structs.HealthCritical, "Unable to start Exec: Couldn't Start Exec")
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithStartExecFailure{}, api.HealthCritical, "Unable to start Exec: Couldn't Start Exec")
|
||||
}
|
||||
|
||||
func TestDockerCheckWhenExecInfoFails(t *testing.T) {
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithExecInfoErrors{}, structs.HealthCritical, "Unable to inspect Exec: Unable to query exec info")
|
||||
expectDockerCheckStatus(t, &fakeDockerClientWithExecInfoErrors{}, api.HealthCritical, "Unable to inspect Exec: Unable to query exec info")
|
||||
}
|
||||
|
||||
func TestDockerCheckDefaultToSh(t *testing.T) {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
)
|
||||
|
||||
|
@ -18,7 +18,7 @@ func TestCoordinate_Datacenters(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/coordinate/datacenters", nil)
|
||||
if err != nil {
|
||||
|
@ -46,7 +46,7 @@ func TestCoordinate_Nodes(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Make sure an empty list is non-nil.
|
||||
req, err := http.NewRequest("GET", "/v1/coordinate/nodes?dc=dc1", nil)
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
|
@ -144,7 +145,7 @@ func TestDNS_NodeLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -239,7 +240,7 @@ func TestDNS_CaseInsensitiveNodeLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -273,7 +274,7 @@ func TestDNS_NodeLookup_PeriodName(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node with period in name
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -315,7 +316,7 @@ func TestDNS_NodeLookup_AAAA(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -368,7 +369,7 @@ func TestDNS_NodeLookup_CNAME(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -414,7 +415,7 @@ func TestDNS_ReverseLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -457,7 +458,7 @@ func TestDNS_ReverseLookup_CustomDomain(t *testing.T) {
|
|||
defer srv.agent.Shutdown()
|
||||
srv.domain = dns.Fqdn("custom")
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -499,7 +500,7 @@ func TestDNS_ReverseLookup_IPV6(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -541,7 +542,7 @@ func TestDNS_ServiceLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
|
@ -664,7 +665,7 @@ func TestDNS_ExternalServiceLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with an external service.
|
||||
{
|
||||
|
@ -740,7 +741,7 @@ func TestDNS_ExternalServiceToConsulCNAMELookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register the initial node with a service
|
||||
{
|
||||
|
@ -851,7 +852,7 @@ func TestDNS_ExternalServiceToConsulCNAMENestedLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register the initial node with a service
|
||||
{
|
||||
|
@ -993,7 +994,7 @@ func TestDNS_ServiceLookup_ServiceAddress_A(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
|
@ -1088,7 +1089,7 @@ func TestDNS_ServiceLookup_ServiceAddress_CNAME(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service whose address isn't an IP.
|
||||
{
|
||||
|
@ -1183,7 +1184,7 @@ func TestDNS_ServiceLookup_ServiceAddressIPV6(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
|
@ -1289,8 +1290,8 @@ func TestDNS_ServiceLookup_WanAddress(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer srv2.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
|
||||
// Join WAN cluster
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -1299,7 +1300,7 @@ func TestDNS_ServiceLookup_WanAddress(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(srv1.agent.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
@ -1465,7 +1466,7 @@ func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
|
@ -1538,7 +1539,7 @@ func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -1599,7 +1600,7 @@ func TestDNS_ServiceLookup_PreparedQueryNamePeriod(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
|
@ -1680,7 +1681,7 @@ func TestDNS_ServiceLookup_Dedup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a single node with multiple instances of a service.
|
||||
{
|
||||
|
@ -1785,7 +1786,7 @@ func TestDNS_ServiceLookup_Dedup_SRV(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a single node with multiple instances of a service.
|
||||
{
|
||||
|
@ -2033,7 +2034,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register nodes with health checks in various states.
|
||||
{
|
||||
|
@ -2049,7 +2050,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Check: &structs.HealthCheck{
|
||||
CheckID: "serf",
|
||||
Name: "serf",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2070,7 +2071,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
Check: &structs.HealthCheck{
|
||||
CheckID: "serf",
|
||||
Name: "serf",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
if err := srv.agent.RPC("Catalog.Register", args2, &out); err != nil {
|
||||
|
@ -2090,7 +2091,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
CheckID: "db",
|
||||
Name: "db",
|
||||
ServiceID: "db",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
if err := srv.agent.RPC("Catalog.Register", args3, &out); err != nil {
|
||||
|
@ -2124,7 +2125,7 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
|
|||
CheckID: "db",
|
||||
Name: "db",
|
||||
ServiceID: "db",
|
||||
Status: structs.HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
},
|
||||
}
|
||||
if err := srv.agent.RPC("Catalog.Register", args5, &out); err != nil {
|
||||
|
@ -2191,7 +2192,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register nodes with all health checks in a critical state.
|
||||
{
|
||||
|
@ -2207,7 +2208,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
Check: &structs.HealthCheck{
|
||||
CheckID: "serf",
|
||||
Name: "serf",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2228,7 +2229,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
Check: &structs.HealthCheck{
|
||||
CheckID: "serf",
|
||||
Name: "serf",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
if err := srv.agent.RPC("Catalog.Register", args2, &out); err != nil {
|
||||
|
@ -2248,7 +2249,7 @@ func TestDNS_ServiceLookup_OnlyFailing(t *testing.T) {
|
|||
CheckID: "db",
|
||||
Name: "db",
|
||||
ServiceID: "db",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
if err := srv.agent.RPC("Catalog.Register", args3, &out); err != nil {
|
||||
|
@ -2308,7 +2309,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register nodes with health checks in various states.
|
||||
{
|
||||
|
@ -2325,7 +2326,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
CheckID: "db",
|
||||
Name: "db",
|
||||
ServiceID: "db",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2347,7 +2348,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
CheckID: "db",
|
||||
Name: "db",
|
||||
ServiceID: "db",
|
||||
Status: structs.HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2368,7 +2369,7 @@ func TestDNS_ServiceLookup_OnlyPassing(t *testing.T) {
|
|||
CheckID: "db",
|
||||
Name: "db",
|
||||
ServiceID: "db",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -2431,7 +2432,7 @@ func TestDNS_ServiceLookup_Randomize(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a large number of nodes.
|
||||
for i := 0; i < generateNumNodes; i++ {
|
||||
|
@ -2526,7 +2527,7 @@ func TestDNS_ServiceLookup_Truncate(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a large number of nodes.
|
||||
for i := 0; i < generateNumNodes; i++ {
|
||||
|
@ -2595,7 +2596,7 @@ func TestDNS_ServiceLookup_LargeResponses(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
longServiceName := "this-is-a-very-very-very-very-very-long-name-for-a-service"
|
||||
|
||||
|
@ -2698,7 +2699,7 @@ func testDNS_ServiceLookup_responseLimits(t *testing.T, answerLimit int, qType u
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
for i := 0; i < generateNumNodes; i++ {
|
||||
nodeAddress := fmt.Sprintf("127.0.0.%d", i+1)
|
||||
|
@ -2847,7 +2848,7 @@ func TestDNS_ServiceLookup_CNAME(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a name for an address.
|
||||
{
|
||||
|
@ -2948,7 +2949,7 @@ func TestDNS_NodeLookup_TTL(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -3069,7 +3070,7 @@ func TestDNS_ServiceLookup_TTL(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node with 2 services
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -3172,7 +3173,7 @@ func TestDNS_PreparedQuery_TTL(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node and a service.
|
||||
{
|
||||
|
@ -3365,8 +3366,8 @@ func TestDNS_PreparedQuery_Failover(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer srv2.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
|
||||
// Join WAN cluster.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -3374,7 +3375,7 @@ func TestDNS_PreparedQuery_Failover(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(srv1.agent.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
@ -3465,7 +3466,7 @@ func TestDNS_ServiceLookup_SRV_RFC(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -3542,7 +3543,7 @@ func TestDNS_ServiceLookup_SRV_RFC_TCP_Default(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -3625,7 +3626,7 @@ func TestDNS_ServiceLookup_FilterACL(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a service
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -3675,7 +3676,7 @@ func TestDNS_AddressLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Look up the addresses
|
||||
cases := map[string]string{
|
||||
|
@ -3714,7 +3715,7 @@ func TestDNS_AddressLookupIPV6(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Look up the addresses
|
||||
cases := map[string]string{
|
||||
|
@ -3784,7 +3785,7 @@ func TestDNS_NonExistingLookupEmptyAorAAAA(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a v6-only service and a v4-only service.
|
||||
{
|
||||
|
@ -3930,7 +3931,7 @@ func TestDNS_PreparedQuery_AllowStale(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
m := MockPreparedQuery{}
|
||||
if err := srv.agent.InjectEndpoint("PreparedQuery", &m); err != nil {
|
||||
|
@ -3975,7 +3976,7 @@ func TestDNS_InvalidQueries(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Try invalid forms of queries that should hit the special invalid case
|
||||
// of our query parser.
|
||||
|
@ -4015,7 +4016,7 @@ func TestDNS_PreparedQuery_AgentSource(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
m := MockPreparedQuery{}
|
||||
if err := srv.agent.InjectEndpoint("PreparedQuery", &m); err != nil {
|
||||
|
@ -4437,7 +4438,7 @@ func TestDNS_Compression_Query(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
|
@ -4526,7 +4527,7 @@ func TestDNS_Compression_ReverseLookup(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register node.
|
||||
args := &structs.RegisterRequest{
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
|
@ -131,7 +132,7 @@ func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Requ
|
|||
}
|
||||
|
||||
// Filter to only passing if specified
|
||||
if _, ok := params[structs.HealthPassing]; ok {
|
||||
if _, ok := params[api.HealthPassing]; ok {
|
||||
out.Nodes = filterNonPassing(out.Nodes)
|
||||
}
|
||||
|
||||
|
@ -161,7 +162,7 @@ OUTER:
|
|||
for i := 0; i < n; i++ {
|
||||
node := nodes[i]
|
||||
for _, check := range node.Checks {
|
||||
if check.Status != structs.HealthPassing {
|
||||
if check.Status != api.HealthPassing {
|
||||
nodes[i], nodes[n-1] = nodes[n-1], structs.CheckServiceNode{}
|
||||
n--
|
||||
i--
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
)
|
||||
|
||||
|
@ -20,7 +21,7 @@ func TestHealthChecksInState(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -47,7 +48,7 @@ func TestHealthChecksInState(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -79,7 +80,7 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
Check: &structs.HealthCheck{
|
||||
Node: "bar",
|
||||
Name: "node check",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
|
@ -92,7 +93,7 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -120,7 +121,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -129,7 +130,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) {
|
|||
Check: &structs.HealthCheck{
|
||||
Node: "bar",
|
||||
Name: "node check",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -176,7 +177,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) {
|
|||
}
|
||||
|
||||
// Retry until foo moves to the front of the line.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -205,7 +206,7 @@ func TestHealthNodeChecks(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/health/node/nope?dc=dc1", nil)
|
||||
if err != nil {
|
||||
|
@ -251,7 +252,7 @@ func TestHealthServiceChecks(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1", nil)
|
||||
if err != nil {
|
||||
|
@ -313,7 +314,7 @@ func TestHealthServiceChecks_NodeMetaFilter(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
||||
if err != nil {
|
||||
|
@ -376,7 +377,7 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Create a service check
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -437,7 +438,7 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) {
|
|||
}
|
||||
|
||||
// Retry until foo has moved to the front of the line.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = srv.HealthServiceChecks(resp, req)
|
||||
if err != nil {
|
||||
|
@ -466,7 +467,7 @@ func TestHealthServiceNodes(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1", nil)
|
||||
if err != nil {
|
||||
|
@ -547,7 +548,7 @@ func TestHealthServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
||||
if err != nil {
|
||||
|
@ -610,7 +611,7 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Create a service check
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -671,7 +672,7 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) {
|
|||
}
|
||||
|
||||
// Retry until foo has moved to the front of the line.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = srv.HealthServiceNodes(resp, req)
|
||||
if err != nil {
|
||||
|
@ -700,7 +701,7 @@ func TestHealthServiceNodes_PassingFilter(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Create a failing service check
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -711,7 +712,7 @@ func TestHealthServiceNodes_PassingFilter(t *testing.T) {
|
|||
Node: srv.agent.config.NodeName,
|
||||
Name: "consul check",
|
||||
ServiceID: "consul",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -749,7 +750,7 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer srv1.Shutdown()
|
||||
defer srv1.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv1.agent.RPC, "dc1")
|
||||
|
||||
dir2, srv2 := makeHTTPServerWithConfig(t,
|
||||
func(c *Config) {
|
||||
|
@ -759,7 +760,7 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer srv2.Shutdown()
|
||||
defer srv2.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
|
||||
|
||||
// Wait for the WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -767,7 +768,7 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(srv1.agent.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -839,27 +840,27 @@ func TestFilterNonPassing(t *testing.T) {
|
|||
structs.CheckServiceNode{
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
},
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/logger"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
|
@ -45,7 +45,7 @@ func makeHTTPServerWithACLs(t *testing.T) (string, *HTTPServer) {
|
|||
|
||||
// Need a leader to look up ACLs, so wait here so we don't need to
|
||||
// repeat this in each test.
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
return dir, srv
|
||||
}
|
||||
|
||||
|
@ -763,6 +763,6 @@ func httpTestWithConfig(t *testing.T, f func(srv *HTTPServer), cb func(c *Config
|
|||
defer os.RemoveAll(dir)
|
||||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
f(srv)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestAgent_LoadKeyrings(t *testing.T) {
|
||||
|
@ -129,7 +129,7 @@ func TestAgentKeyring_ACL(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// List keys without access fails
|
||||
_, err := agent.ListKeys("", 0)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
|
@ -141,7 +142,7 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s
|
|||
}
|
||||
applyReq := structs.KVSRequest{
|
||||
Datacenter: args.Datacenter,
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: args.Key,
|
||||
Flags: 0,
|
||||
|
@ -167,19 +168,19 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s
|
|||
return nil, err
|
||||
}
|
||||
applyReq.DirEnt.ModifyIndex = casVal
|
||||
applyReq.Op = structs.KVSCAS
|
||||
applyReq.Op = api.KVCAS
|
||||
}
|
||||
|
||||
// Check for lock acquisition
|
||||
if _, ok := params["acquire"]; ok {
|
||||
applyReq.DirEnt.Session = params.Get("acquire")
|
||||
applyReq.Op = structs.KVSLock
|
||||
applyReq.Op = api.KVLock
|
||||
}
|
||||
|
||||
// Check for lock release
|
||||
if _, ok := params["release"]; ok {
|
||||
applyReq.DirEnt.Session = params.Get("release")
|
||||
applyReq.Op = structs.KVSUnlock
|
||||
applyReq.Op = api.KVUnlock
|
||||
}
|
||||
|
||||
// Check the content-length
|
||||
|
@ -203,7 +204,7 @@ func (s *HTTPServer) KVSPut(resp http.ResponseWriter, req *http.Request, args *s
|
|||
}
|
||||
|
||||
// Only use the out value if this was a CAS
|
||||
if applyReq.Op == structs.KVSSet {
|
||||
if applyReq.Op == api.KVSet {
|
||||
return true, nil
|
||||
} else {
|
||||
return out, nil
|
||||
|
@ -217,7 +218,7 @@ func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args
|
|||
}
|
||||
applyReq := structs.KVSRequest{
|
||||
Datacenter: args.Datacenter,
|
||||
Op: structs.KVSDelete,
|
||||
Op: api.KVDelete,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: args.Key,
|
||||
},
|
||||
|
@ -227,7 +228,7 @@ func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args
|
|||
// Check for recurse
|
||||
params := req.URL.Query()
|
||||
if _, ok := params["recurse"]; ok {
|
||||
applyReq.Op = structs.KVSDeleteTree
|
||||
applyReq.Op = api.KVDeleteTree
|
||||
} else if missingKey(resp, args) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -239,7 +240,7 @@ func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args
|
|||
return nil, err
|
||||
}
|
||||
applyReq.DirEnt.ModifyIndex = casVal
|
||||
applyReq.Op = structs.KVSDeleteCAS
|
||||
applyReq.Op = api.KVDeleteCAS
|
||||
}
|
||||
|
||||
// Make the RPC
|
||||
|
@ -249,7 +250,7 @@ func (s *HTTPServer) KVSDelete(resp http.ResponseWriter, req *http.Request, args
|
|||
}
|
||||
|
||||
// Only use the out value if this was a CAS
|
||||
if applyReq.Op == structs.KVSDeleteCAS {
|
||||
if applyReq.Op == api.KVDeleteCAS {
|
||||
return out, nil
|
||||
} else {
|
||||
return true, nil
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestKVSEndpoint_PUT_GET_DELETE(t *testing.T) {
|
||||
|
@ -19,7 +19,7 @@ func TestKVSEndpoint_PUT_GET_DELETE(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"baz",
|
||||
|
@ -94,7 +94,7 @@ func TestKVSEndpoint_Recurse(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"bar",
|
||||
|
@ -190,7 +190,7 @@ func TestKVSEndpoint_DELETE_CAS(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
{
|
||||
buf := bytes.NewBuffer([]byte("test"))
|
||||
|
@ -277,7 +277,7 @@ func TestKVSEndpoint_CAS(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
{
|
||||
buf := bytes.NewBuffer([]byte("test"))
|
||||
|
@ -374,7 +374,7 @@ func TestKVSEndpoint_ListKeys(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"bar",
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
|
@ -261,7 +262,7 @@ func (l *localState) UpdateCheck(checkID types.CheckID, status, output string) {
|
|||
|
||||
// Update the critical time tracking (this doesn't cause a server updates
|
||||
// so we can always keep this up to date).
|
||||
if status == structs.HealthCritical {
|
||||
if status == api.HealthCritical {
|
||||
_, wasCritical := l.checkCriticalTime[checkID]
|
||||
if !wasCritical {
|
||||
l.checkCriticalTime[checkID] = time.Now()
|
||||
|
|
|
@ -7,8 +7,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
|
@ -18,7 +19,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Register info
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -183,7 +184,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(verifyServices); err != nil {
|
||||
if err := testrpc.WaitForResult(verifyServices); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -246,7 +247,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(verifyServicesAfterRemove); err != nil {
|
||||
if err := testrpc.WaitForResult(verifyServicesAfterRemove); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -257,7 +258,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -350,7 +351,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(verifyServices); err != nil {
|
||||
if err := testrpc.WaitForResult(verifyServices); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +362,7 @@ func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
{
|
||||
// Single check
|
||||
|
@ -378,7 +379,7 @@ func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
|||
CheckID: "mysql",
|
||||
Name: "mysql",
|
||||
ServiceID: "mysql",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk, "")
|
||||
|
||||
|
@ -429,7 +430,7 @@ func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
|||
CheckID: "redis:1",
|
||||
Name: "redis:1",
|
||||
ServiceID: "redis",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk1, "")
|
||||
|
||||
|
@ -438,7 +439,7 @@ func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
|
|||
CheckID: "redis:2",
|
||||
Name: "redis:2",
|
||||
ServiceID: "redis",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk2, "")
|
||||
|
||||
|
@ -499,7 +500,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Create the ACL
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -656,7 +657,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Register info
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -671,7 +672,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
CheckID: "mysql",
|
||||
Name: "mysql",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk1, "")
|
||||
args.Check = chk1
|
||||
|
@ -684,13 +685,13 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
CheckID: "redis",
|
||||
Name: "redis",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk2, "")
|
||||
|
||||
chk2_mod := new(structs.HealthCheck)
|
||||
*chk2_mod = *chk2
|
||||
chk2_mod.Status = structs.HealthCritical
|
||||
chk2_mod.Status = api.HealthCritical
|
||||
args.Check = chk2_mod
|
||||
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -701,7 +702,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
CheckID: "web",
|
||||
Name: "web",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk3, "")
|
||||
|
||||
|
@ -710,7 +711,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
CheckID: "lb",
|
||||
Name: "lb",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
args.Check = chk4
|
||||
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
|
||||
|
@ -722,7 +723,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
CheckID: "cache",
|
||||
Name: "cache",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk5, "")
|
||||
agent.state.checkStatus["cache"] = syncStatus{inSync: true}
|
||||
|
@ -737,7 +738,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
var checks structs.IndexedHealthChecks
|
||||
|
||||
// Verify that we are in sync
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
return false, fmt.Errorf("err: %v", err)
|
||||
}
|
||||
|
@ -819,7 +820,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
agent.StartSync()
|
||||
|
||||
// Verify that we are in sync
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
return false, fmt.Errorf("err: %v", err)
|
||||
}
|
||||
|
@ -880,7 +881,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Create the ACL
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -979,7 +980,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
ServiceName: "mysql",
|
||||
CheckID: "mysql-check",
|
||||
Name: "mysql",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk1, token)
|
||||
|
||||
|
@ -990,7 +991,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
ServiceName: "api",
|
||||
CheckID: "api-check",
|
||||
Name: "api",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
agent.state.AddCheck(chk2, token)
|
||||
|
||||
|
@ -999,7 +1000,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Verify that we are in sync
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: agent.config.NodeName,
|
||||
|
@ -1057,7 +1058,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Verify that we are in sync
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: agent.config.NodeName,
|
||||
|
@ -1120,14 +1121,14 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Create a check
|
||||
check := &structs.HealthCheck{
|
||||
Node: agent.config.NodeName,
|
||||
CheckID: "web",
|
||||
Name: "web",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Output: "",
|
||||
}
|
||||
agent.state.AddCheck(check, "")
|
||||
|
@ -1142,7 +1143,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
var checks structs.IndexedHealthChecks
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
return false, fmt.Errorf("err: %v", err)
|
||||
}
|
||||
|
@ -1158,7 +1159,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
|
||||
// Update the check output! Should be deferred
|
||||
agent.state.UpdateCheck("web", structs.HealthPassing, "output")
|
||||
agent.state.UpdateCheck("web", api.HealthPassing, "output")
|
||||
|
||||
// Should not update for 500 milliseconds
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
@ -1177,7 +1178,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for a deferred update
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -1262,7 +1263,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
|
||||
// Now make an update that should be deferred.
|
||||
agent.state.UpdateCheck("web", structs.HealthPassing, "deferred")
|
||||
agent.state.UpdateCheck("web", api.HealthPassing, "deferred")
|
||||
|
||||
// Trigger anti-entropy run and wait.
|
||||
agent.StartSync()
|
||||
|
@ -1283,7 +1284,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for the deferred update.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -1312,7 +1313,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Register info
|
||||
args := &structs.RegisterRequest{
|
||||
|
@ -1335,7 +1336,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
|||
var services structs.IndexedNodeServices
|
||||
|
||||
// Wait for the sync
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
return false, fmt.Errorf("err: %v", err)
|
||||
}
|
||||
|
@ -1365,7 +1366,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
|||
|
||||
// Wait for the sync - this should have been a sync of just the
|
||||
// node info
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
return false, fmt.Errorf("err: %v", err)
|
||||
}
|
||||
|
@ -1462,7 +1463,7 @@ func TestAgent_checkCriticalTime(t *testing.T) {
|
|||
CheckID: checkID,
|
||||
Name: "redis:1",
|
||||
ServiceID: "redis",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
l.AddCheck(chk, "")
|
||||
if checks := l.CriticalChecks(); len(checks) > 0 {
|
||||
|
@ -1470,13 +1471,13 @@ func TestAgent_checkCriticalTime(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set it to warning and make sure that doesn't show up as critical.
|
||||
l.UpdateCheck(checkID, structs.HealthWarning, "")
|
||||
l.UpdateCheck(checkID, api.HealthWarning, "")
|
||||
if checks := l.CriticalChecks(); len(checks) > 0 {
|
||||
t.Fatalf("should not have any critical checks")
|
||||
}
|
||||
|
||||
// Fail the check and make sure the time looks reasonable.
|
||||
l.UpdateCheck(checkID, structs.HealthCritical, "")
|
||||
l.UpdateCheck(checkID, api.HealthCritical, "")
|
||||
if crit, ok := l.CriticalChecks()[checkID]; !ok {
|
||||
t.Fatalf("should have a critical check")
|
||||
} else if crit.CriticalFor > time.Millisecond {
|
||||
|
@ -1486,7 +1487,7 @@ func TestAgent_checkCriticalTime(t *testing.T) {
|
|||
// Wait a while, then fail it again and make sure the time keeps track
|
||||
// of the initial failure, and doesn't reset here.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
l.UpdateCheck(chk.CheckID, structs.HealthCritical, "")
|
||||
l.UpdateCheck(chk.CheckID, api.HealthCritical, "")
|
||||
if crit, ok := l.CriticalChecks()[checkID]; !ok {
|
||||
t.Fatalf("should have a critical check")
|
||||
} else if crit.CriticalFor < 5*time.Millisecond ||
|
||||
|
@ -1495,14 +1496,14 @@ func TestAgent_checkCriticalTime(t *testing.T) {
|
|||
}
|
||||
|
||||
// Set it passing again.
|
||||
l.UpdateCheck(checkID, structs.HealthPassing, "")
|
||||
l.UpdateCheck(checkID, api.HealthPassing, "")
|
||||
if checks := l.CriticalChecks(); len(checks) > 0 {
|
||||
t.Fatalf("should not have any critical checks")
|
||||
}
|
||||
|
||||
// Fail the check and make sure the time looks like it started again
|
||||
// from the latest failure, not the original one.
|
||||
l.UpdateCheck(checkID, structs.HealthCritical, "")
|
||||
l.UpdateCheck(checkID, api.HealthCritical, "")
|
||||
if crit, ok := l.CriticalChecks()[checkID]; !ok {
|
||||
t.Fatalf("should have a critical check")
|
||||
} else if crit.CriticalFor > time.Millisecond {
|
||||
|
@ -1553,14 +1554,14 @@ func TestAgent_sendCoordinate(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Make sure the coordinate is present.
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: agent.config.Datacenter,
|
||||
}
|
||||
var reply structs.IndexedCoordinates
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
||||
return false, fmt.Errorf("err: %s", err)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
|
@ -302,7 +303,7 @@ func (a *Agent) remoteExecWriteKey(event *remoteExecEvent, suffix string, val []
|
|||
key := path.Join(event.Prefix, event.Session, a.config.NodeName, suffix)
|
||||
write := structs.KVSRequest{
|
||||
Datacenter: a.config.Datacenter,
|
||||
Op: structs.KVSLock,
|
||||
Op: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Value: val,
|
||||
|
|
|
@ -9,8 +9,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
||||
|
@ -110,7 +111,7 @@ func testRemoteExecGetSpec(t *testing.T, c *Config) {
|
|||
dir, agent := makeAgent(t, c)
|
||||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
event := &remoteExecEvent{
|
||||
Prefix: "_rexec",
|
||||
|
@ -156,7 +157,7 @@ func testRemoteExecWrites(t *testing.T, c *Config) {
|
|||
dir, agent := makeAgent(t, c)
|
||||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
event := &remoteExecEvent{
|
||||
Prefix: "_rexec",
|
||||
|
@ -210,7 +211,7 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string
|
|||
dir, agent := makeAgent(t, nextConfig())
|
||||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
event := &remoteExecEvent{
|
||||
Prefix: "_rexec",
|
||||
|
@ -305,7 +306,7 @@ func destroySession(t *testing.T, agent *Agent, session string) {
|
|||
func setKV(t *testing.T, agent *Agent, key string, val []byte) {
|
||||
write := structs.KVSRequest{
|
||||
Datacenter: agent.config.Datacenter,
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Value: val,
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -25,7 +26,7 @@ func TestSessionCreate(t *testing.T) {
|
|||
Node: srv.agent.config.NodeName,
|
||||
Name: "consul",
|
||||
ServiceID: "consul",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
|
@ -73,7 +74,7 @@ func TestSessionCreateDelete(t *testing.T) {
|
|||
Node: srv.agent.config.NodeName,
|
||||
Name: "consul",
|
||||
ServiceID: "consul",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestStatusLeader(t *testing.T) {
|
||||
|
@ -13,7 +13,7 @@ func TestStatusLeader(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
obj, err := srv.StatusLeader(nil, nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
@ -59,7 +60,7 @@ func (c *CheckDefinition) HealthCheck(node string) *structs.HealthCheck {
|
|||
Node: node,
|
||||
CheckID: c.ID,
|
||||
Name: c.Name,
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
Notes: c.Notes,
|
||||
ServiceID: c.ServiceID,
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
func TestAgentStructs_HealthCheck(t *testing.T) {
|
||||
|
@ -12,7 +12,7 @@ func TestAgentStructs_HealthCheck(t *testing.T) {
|
|||
check := def.HealthCheck("node1")
|
||||
|
||||
// Health checks default to critical state
|
||||
if check.Status != structs.HealthCritical {
|
||||
if check.Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %v", check.Status)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,6 +82,15 @@ func fixupKVOps(raw interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// isWrite returns true if the given operation alters the state store.
|
||||
func isWrite(op api.KVOp) bool {
|
||||
switch op {
|
||||
case api.KVSet, api.KVDelete, api.KVDeleteCAS, api.KVDeleteTree, api.KVCAS, api.KVLock, api.KVUnlock:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// convertOps takes the incoming body in API format and converts it to the
|
||||
// internal RPC format. This returns a count of the number of write ops, and
|
||||
// a boolean, that if false means an error response has been generated and
|
||||
|
@ -125,8 +134,8 @@ func (s *HTTPServer) convertOps(resp http.ResponseWriter, req *http.Request) (st
|
|||
netKVSize += size
|
||||
}
|
||||
|
||||
verb := structs.KVSOp(in.KV.Verb)
|
||||
if verb.IsWrite() {
|
||||
verb := api.KVOp(in.KV.Verb)
|
||||
if isWrite(verb) {
|
||||
writes += 1
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
|
@ -156,11 +157,11 @@ func summarizeServices(dump structs.NodeDump) []*ServiceSummary {
|
|||
}
|
||||
for _, sum := range services {
|
||||
switch check.Status {
|
||||
case structs.HealthPassing:
|
||||
case api.HealthPassing:
|
||||
sum.ChecksPassing++
|
||||
case structs.HealthWarning:
|
||||
case api.HealthWarning:
|
||||
sum.ChecksWarning++
|
||||
case structs.HealthCritical:
|
||||
case api.HealthCritical:
|
||||
sum.ChecksCritical++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,8 +12,9 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
|
@ -73,7 +74,7 @@ func TestUiNodes(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -117,7 +118,7 @@ func TestUiNodeInfo(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
req, err := http.NewRequest("GET",
|
||||
fmt.Sprintf("/v1/internal/ui/node/%s", srv.agent.config.NodeName), nil)
|
||||
|
@ -187,15 +188,15 @@ func TestSummarizeServices(t *testing.T) {
|
|||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceName: "",
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceName: "web",
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
ServiceName: "api",
|
||||
},
|
||||
},
|
||||
|
@ -210,7 +211,7 @@ func TestSummarizeServices(t *testing.T) {
|
|||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
&structs.HealthCheck{
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestValidateUserEventParams(t *testing.T) {
|
||||
|
@ -153,7 +153,7 @@ func TestFireReceiveEvent(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
srv1 := &structs.NodeService{
|
||||
ID: "mysql",
|
||||
|
@ -175,7 +175,7 @@ func TestFireReceiveEvent(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(agent.UserEvents()) == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -197,7 +197,7 @@ func TestUserEventToken(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, agent.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, agent.RPC, "dc1")
|
||||
|
||||
// Create an ACL token
|
||||
args := structs.ACLRequest{
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -22,7 +22,7 @@ func TestACLEndpoint_Apply(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -82,7 +82,7 @@ func TestACLEndpoint_Update_PurgeCache(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -160,7 +160,7 @@ func TestACLEndpoint_Apply_CustomID(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -206,7 +206,7 @@ func TestACLEndpoint_Apply_Denied(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -233,7 +233,7 @@ func TestACLEndpoint_Apply_DeleteAnon(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -262,7 +262,7 @@ func TestACLEndpoint_Apply_RootChange(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -291,7 +291,7 @@ func TestACLEndpoint_Get(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -338,7 +338,7 @@ func TestACLEndpoint_GetPolicy(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -395,7 +395,7 @@ func TestACLEndpoint_List(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
ids := []string{}
|
||||
for i := 0; i < 5; i++ {
|
||||
|
@ -455,7 +455,7 @@ func TestACLEndpoint_List_Denied(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
getR := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -478,7 +478,7 @@ func TestACLEndpoint_ReplicationStatus(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
getR := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestACLReplication_Sorter(t *testing.T) {
|
||||
|
@ -228,7 +228,7 @@ func TestACLReplication_updateLocalACLs_RateLimit(t *testing.T) {
|
|||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
|
||||
changes := structs.ACLRequests{
|
||||
&structs.ACLRequest{
|
||||
|
@ -342,8 +342,8 @@ func TestACLReplication(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
|
||||
// Create a bunch of new tokens.
|
||||
var id string
|
||||
|
@ -395,7 +395,7 @@ func TestACLReplication(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for the replica to converge.
|
||||
if err := testutil.WaitForResult(checkSame); err != nil {
|
||||
if err := testrpc.WaitForResult(checkSame); err != nil {
|
||||
t.Fatalf("ACLs didn't converge")
|
||||
}
|
||||
|
||||
|
@ -418,7 +418,7 @@ func TestACLReplication(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for the replica to converge.
|
||||
if err := testutil.WaitForResult(checkSame); err != nil {
|
||||
if err := testrpc.WaitForResult(checkSame); err != nil {
|
||||
t.Fatalf("ACLs didn't converge")
|
||||
}
|
||||
|
||||
|
@ -437,7 +437,7 @@ func TestACLReplication(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for the replica to converge.
|
||||
if err := testutil.WaitForResult(checkSame); err != nil {
|
||||
if err := testrpc.WaitForResult(checkSame); err != nil {
|
||||
t.Fatalf("ACLs didn't converge")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
var testACLPolicy = `
|
||||
|
@ -29,7 +29,7 @@ func TestACL_Disabled(t *testing.T) {
|
|||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
acl, err := s1.resolveToken("does not exist")
|
||||
if err != nil {
|
||||
|
@ -73,7 +73,7 @@ func TestACL_Authority_NotFound(t *testing.T) {
|
|||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
acl, err := s1.resolveToken("does not exist")
|
||||
if err == nil || err.Error() != aclNotFound {
|
||||
|
@ -94,7 +94,7 @@ func TestACL_Authority_Found(t *testing.T) {
|
|||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -139,7 +139,7 @@ func TestACL_Authority_Anonymous_Found(t *testing.T) {
|
|||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Resolve the token
|
||||
acl, err := s1.resolveToken("")
|
||||
|
@ -166,7 +166,7 @@ func TestACL_Authority_Master_Found(t *testing.T) {
|
|||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Resolve the token
|
||||
acl, err := s1.resolveToken("foobar")
|
||||
|
@ -194,7 +194,7 @@ func TestACL_Authority_Management(t *testing.T) {
|
|||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Resolve the token
|
||||
acl, err := s1.resolveToken("foobar")
|
||||
|
@ -232,7 +232,7 @@ func TestACL_NonAuthority_NotFound(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ := s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
|
@ -241,7 +241,7 @@ func TestACL_NonAuthority_NotFound(t *testing.T) {
|
|||
|
||||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// find the non-authoritative server
|
||||
var nonAuth *Server
|
||||
|
@ -284,13 +284,13 @@ func TestACL_NonAuthority_Found(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ := s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -360,13 +360,13 @@ func TestACL_NonAuthority_Management(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ := s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// find the non-authoritative server
|
||||
var nonAuth *Server
|
||||
|
@ -417,13 +417,13 @@ func TestACL_DownPolicy_Deny(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ := s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -491,13 +491,13 @@ func TestACL_DownPolicy_Allow(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ := s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -567,13 +567,13 @@ func TestACL_DownPolicy_ExtendCache(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ := s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -666,9 +666,9 @@ func TestACL_Replication(t *testing.T) {
|
|||
if _, err := s3.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc3")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc3")
|
||||
|
||||
// Create a new token.
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -687,7 +687,7 @@ func TestACL_Replication(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for replication to occur.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, acl, err := s2.fsm.State().ACLGet(nil, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -770,8 +770,8 @@ func TestACL_MultiDC_Found(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
@ -49,7 +49,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -65,7 +65,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
|||
// Kill a non-leader server
|
||||
s3.Shutdown()
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
alive := 0
|
||||
for _, m := range s1.LANMembers() {
|
||||
if m.Status == serf.StatusAlive {
|
||||
|
@ -85,7 +85,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
|||
|
||||
// Make sure the dead server is removed and we're back to 3 total peers
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -131,7 +131,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 4, nil
|
||||
}); err != nil {
|
||||
|
@ -144,7 +144,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
|
|||
|
||||
// Should be removed from the peers automatically
|
||||
for _, s := range []*Server{s1, s2, s3} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -183,7 +183,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -191,7 +191,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Add s4 to peers directly
|
||||
s4addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -209,7 +209,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
|
|||
|
||||
// Wait for s4 to be removed
|
||||
for _, s := range []*Server{s1, s2, s3} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -245,12 +245,12 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Wait for the new server to be added as a non-voter, but make sure
|
||||
// it doesn't get promoted to a voter even after ServerStabilizationTime,
|
||||
// because that would result in an even-numbered quorum count.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
future := s1.raft.GetConfiguration()
|
||||
if err := future.Error(); err != nil {
|
||||
return false, err
|
||||
|
@ -292,7 +292,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
future := s1.raft.GetConfiguration()
|
||||
if err := future.Error(); err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -8,9 +8,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
@ -91,7 +92,7 @@ func TestCatalog_Register_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create the ACL.
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -200,8 +201,8 @@ func TestCatalog_Register_ForwardLeader(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Use the follower as the client
|
||||
var codec rpc.ClientCodec
|
||||
|
@ -245,7 +246,7 @@ func TestCatalog_Register_ForwardDC(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc2", // Should forward through s1
|
||||
|
@ -281,7 +282,7 @@ func TestCatalog_Deregister(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -300,7 +301,7 @@ func TestCatalog_Deregister_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create the ACL.
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -517,7 +518,7 @@ func TestCatalog_ListDatacenters(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
var out []string
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListDatacenters", struct{}{}, &out); err != nil {
|
||||
|
@ -560,7 +561,7 @@ func TestCatalog_ListDatacenters_DistanceSort(t *testing.T) {
|
|||
if _, err := s3.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
var out []string
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListDatacenters", struct{}{}, &out); err != nil {
|
||||
|
@ -596,14 +597,14 @@ func TestCatalog_ListNodes(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Just add a node
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
return len(out.Nodes) == 2, nil
|
||||
}); err != nil {
|
||||
|
@ -629,7 +630,7 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Add a new node with the right meta k/v pair
|
||||
node := &structs.Node{Node: "foo", Address: "127.0.0.1", Meta: map[string]string{"somekey": "somevalue"}}
|
||||
|
@ -646,7 +647,7 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
var out structs.IndexedNodes
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
return len(out.Nodes) == 1, nil
|
||||
}); err != nil {
|
||||
|
@ -678,7 +679,7 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should get an empty list of nodes back
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
return len(out.Nodes) == 0, nil
|
||||
}); err != nil {
|
||||
|
@ -706,8 +707,8 @@ func TestCatalog_ListNodes_StaleRaad(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Use the follower as the client
|
||||
var codec rpc.ClientCodec
|
||||
|
@ -774,8 +775,8 @@ func TestCatalog_ListNodes_ConsistentRead_Fail(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Use the leader as the client, kill the follower
|
||||
var codec rpc.ClientCodec
|
||||
|
@ -824,8 +825,8 @@ func TestCatalog_ListNodes_ConsistentRead(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Use the leader as the client, kill the follower
|
||||
var codec rpc.ClientCodec
|
||||
|
@ -859,7 +860,7 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "aaa", Address: "127.0.0.1"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -889,7 +890,7 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
}
|
||||
var out structs.IndexedNodes
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
return len(out.Nodes) == 5, nil
|
||||
}); err != nil {
|
||||
|
@ -917,7 +918,7 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
Source: structs.QuerySource{Datacenter: "dc1", Node: "foo"},
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
return len(out.Nodes) == 5, nil
|
||||
}); err != nil {
|
||||
|
@ -952,7 +953,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// We scope the reply in each of these since msgpack won't clear out an
|
||||
// existing slice if the incoming one is nil, so it's best to start
|
||||
|
@ -1056,7 +1057,7 @@ func TestCatalog_ListServices(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Just add a node
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
|
@ -1097,7 +1098,7 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Add a new node with the right meta k/v pair
|
||||
node := &structs.Node{Node: "foo", Address: "127.0.0.1", Meta: map[string]string{"somekey": "somevalue"}}
|
||||
|
@ -1165,7 +1166,7 @@ func TestCatalog_ListServices_Blocking(t *testing.T) {
|
|||
}
|
||||
var out structs.IndexedServices
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Run the query
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil {
|
||||
|
@ -1223,7 +1224,7 @@ func TestCatalog_ListServices_Timeout(t *testing.T) {
|
|||
}
|
||||
var out structs.IndexedServices
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Run the query
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil {
|
||||
|
@ -1308,7 +1309,7 @@ func TestCatalog_ListServiceNodes(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Just add a node
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
|
@ -1345,7 +1346,7 @@ func TestCatalog_ListServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Add 2 nodes with specific meta maps
|
||||
node := &structs.Node{Node: "foo", Address: "127.0.0.1", Meta: map[string]string{"somekey": "somevalue", "common": "1"}}
|
||||
|
@ -1455,7 +1456,7 @@ func TestCatalog_ListServiceNodes_DistanceSort(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Add a few nodes for the associated services.
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "aaa", Address: "127.0.0.1"})
|
||||
|
@ -1542,7 +1543,7 @@ func TestCatalog_NodeServices(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Just add a node
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
|
@ -1599,7 +1600,7 @@ func TestCatalog_Register_FailedCase1(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1630,7 +1631,7 @@ func testACLFilterServer(t *testing.T) (dir, token string, srv *Server, codec rp
|
|||
})
|
||||
|
||||
codec = rpcClient(t, srv)
|
||||
testutil.WaitForLeader(t, srv.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.RPC, "dc1")
|
||||
|
||||
// Create a new token
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -1664,7 +1665,7 @@ service "foo" {
|
|||
CheckID: "service:foo",
|
||||
Name: "service:foo",
|
||||
ServiceID: "foo",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: "root"},
|
||||
}
|
||||
|
@ -1777,7 +1778,7 @@ func TestCatalog_NodeServices_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Prior to version 8, the node policy should be ignored.
|
||||
args := structs.NodeSpecificRequest{
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
@ -84,14 +84,14 @@ func TestClient_JoinLAN(t *testing.T) {
|
|||
if _, err := c1.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
}
|
||||
|
||||
// Check the members
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
server_check := len(s1.LANMembers()) == 2
|
||||
client_check := len(c1.LANMembers()) == 2
|
||||
return server_check && client_check, nil
|
||||
|
@ -100,7 +100,7 @@ func TestClient_JoinLAN(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check we have a new consul
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
|
@ -189,7 +189,7 @@ func TestClient_RPC(t *testing.T) {
|
|||
}
|
||||
|
||||
// RPC should succeed
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
||||
return err == nil, err
|
||||
}); err != nil {
|
||||
|
@ -214,7 +214,7 @@ func TestClient_RPC_Pool(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for both agents to finish joining
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.LANMembers()) == 2 && len(c1.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Server has %v of %v expected members; Client has %v of %v expected members.",
|
||||
|
@ -230,7 +230,7 @@ func TestClient_RPC_Pool(t *testing.T) {
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
var out struct{}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
||||
return err == nil, err
|
||||
}); err != nil {
|
||||
|
@ -345,7 +345,7 @@ func TestClient_RPC_TLS(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for joins to finish/RPC to succeed
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
if len(s1.LANMembers()) != 2 {
|
||||
return false, fmt.Errorf("bad len: %v", len(s1.LANMembers()))
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ func TestClient_SnapshotRPC(t *testing.T) {
|
|||
defer c1.Shutdown()
|
||||
|
||||
// Wait for the leader
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -384,7 +384,7 @@ func TestClient_SnapshotRPC(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait until we've got a healthy server.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
|
@ -430,7 +430,7 @@ func TestClient_SnapshotRPC_TLS(t *testing.T) {
|
|||
defer c1.Shutdown()
|
||||
|
||||
// Wait for the leader
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -443,7 +443,7 @@ func TestClient_SnapshotRPC_TLS(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait until we've got a healthy server.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return c1.servers.NumServers() == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatal("expected consul server")
|
||||
|
@ -493,10 +493,10 @@ func TestClientServer_UserEvent(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for the leader
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Check the members
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(c1.LANMembers()) == 2 && len(s1.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
)
|
||||
|
@ -55,7 +55,7 @@ func TestCoordinate_Update(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Register some nodes.
|
||||
nodes := []string{"node1", "node2"}
|
||||
|
@ -199,7 +199,7 @@ func TestCoordinate_Update_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Register some nodes.
|
||||
nodes := []string{"node1", "node2"}
|
||||
|
@ -275,7 +275,7 @@ func TestCoordinate_ListDatacenters(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// It's super hard to force the Serfs into a known configuration of
|
||||
// coordinates, so the best we can do is make sure our own DC shows
|
||||
|
@ -305,7 +305,7 @@ func TestCoordinate_ListNodes(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Register some nodes.
|
||||
nodes := []string{"foo", "bar", "baz"}
|
||||
|
@ -351,7 +351,7 @@ func TestCoordinate_ListNodes(t *testing.T) {
|
|||
}
|
||||
|
||||
// Now query back for all the nodes.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Register some nodes.
|
||||
nodes := []string{"foo", "bar", "baz"}
|
||||
|
@ -446,7 +446,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
|
|||
// Wait for all the coordinate updates to apply. Since we aren't
|
||||
// enforcing version 8 ACLs, this should also allow us to read
|
||||
// everything back without a token.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/state"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
|
@ -168,34 +169,34 @@ func (c *consulFSM) applyKVSOperation(buf []byte, index uint64) interface{} {
|
|||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "kvs", string(req.Op)}, time.Now())
|
||||
switch req.Op {
|
||||
case structs.KVSSet:
|
||||
case api.KVSet:
|
||||
return c.state.KVSSet(index, &req.DirEnt)
|
||||
case structs.KVSDelete:
|
||||
case api.KVDelete:
|
||||
return c.state.KVSDelete(index, req.DirEnt.Key)
|
||||
case structs.KVSDeleteCAS:
|
||||
case api.KVDeleteCAS:
|
||||
act, err := c.state.KVSDeleteCAS(index, req.DirEnt.ModifyIndex, req.DirEnt.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return act
|
||||
}
|
||||
case structs.KVSDeleteTree:
|
||||
case api.KVDeleteTree:
|
||||
return c.state.KVSDeleteTree(index, req.DirEnt.Key)
|
||||
case structs.KVSCAS:
|
||||
case api.KVCAS:
|
||||
act, err := c.state.KVSSetCAS(index, &req.DirEnt)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return act
|
||||
}
|
||||
case structs.KVSLock:
|
||||
case api.KVLock:
|
||||
act, err := c.state.KVSLock(index, &req.DirEnt)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return act
|
||||
}
|
||||
case structs.KVSUnlock:
|
||||
case api.KVUnlock:
|
||||
act, err := c.state.KVSUnlock(index, &req.DirEnt)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -7,13 +7,15 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/state"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/raft"
|
||||
"time"
|
||||
)
|
||||
|
||||
type MockSink struct {
|
||||
|
@ -114,7 +116,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "db",
|
||||
Name: "db connectivity",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -231,7 +233,7 @@ func TestFSM_DeregisterCheck(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "mem",
|
||||
Name: "memory util",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
buf, err := structs.Encode(structs.RegisterRequestType, req)
|
||||
|
@ -298,7 +300,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "db",
|
||||
Name: "db connectivity",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -371,7 +373,7 @@ func TestFSM_SnapshotRestore(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "web",
|
||||
Name: "web connectivity",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "web",
|
||||
})
|
||||
fsm.state.KVSSet(8, &structs.DirEntry{
|
||||
|
@ -657,7 +659,7 @@ func TestFSM_KVSDelete(t *testing.T) {
|
|||
|
||||
req := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Flags: 0,
|
||||
|
@ -674,7 +676,7 @@ func TestFSM_KVSDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
// Run the delete
|
||||
req.Op = structs.KVSDelete
|
||||
req.Op = api.KVDelete
|
||||
buf, err = structs.Encode(structs.KVSRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -702,7 +704,7 @@ func TestFSM_KVSDeleteTree(t *testing.T) {
|
|||
|
||||
req := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Flags: 0,
|
||||
|
@ -719,7 +721,7 @@ func TestFSM_KVSDeleteTree(t *testing.T) {
|
|||
}
|
||||
|
||||
// Run the delete tree
|
||||
req.Op = structs.KVSDeleteTree
|
||||
req.Op = api.KVDeleteTree
|
||||
req.DirEnt.Key = "/test"
|
||||
buf, err = structs.Encode(structs.KVSRequestType, req)
|
||||
if err != nil {
|
||||
|
@ -748,7 +750,7 @@ func TestFSM_KVSDeleteCheckAndSet(t *testing.T) {
|
|||
|
||||
req := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Flags: 0,
|
||||
|
@ -774,7 +776,7 @@ func TestFSM_KVSDeleteCheckAndSet(t *testing.T) {
|
|||
}
|
||||
|
||||
// Run the check-and-set
|
||||
req.Op = structs.KVSDeleteCAS
|
||||
req.Op = api.KVDeleteCAS
|
||||
req.DirEnt.ModifyIndex = d.ModifyIndex
|
||||
buf, err = structs.Encode(structs.KVSRequestType, req)
|
||||
if err != nil {
|
||||
|
@ -803,7 +805,7 @@ func TestFSM_KVSCheckAndSet(t *testing.T) {
|
|||
|
||||
req := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Flags: 0,
|
||||
|
@ -829,7 +831,7 @@ func TestFSM_KVSCheckAndSet(t *testing.T) {
|
|||
}
|
||||
|
||||
// Run the check-and-set
|
||||
req.Op = structs.KVSCAS
|
||||
req.Op = api.KVCAS
|
||||
req.DirEnt.ModifyIndex = d.ModifyIndex
|
||||
req.DirEnt.Value = []byte("zip")
|
||||
buf, err = structs.Encode(structs.KVSRequestType, req)
|
||||
|
@ -901,7 +903,7 @@ func TestFSM_SessionCreate_Destroy(t *testing.T) {
|
|||
fsm.state.EnsureCheck(2, &structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "web",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
})
|
||||
|
||||
// Create a new session
|
||||
|
@ -982,7 +984,7 @@ func TestFSM_KVSLock(t *testing.T) {
|
|||
|
||||
req := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSLock,
|
||||
Op: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Value: []byte("test"),
|
||||
|
@ -1026,7 +1028,7 @@ func TestFSM_KVSUnlock(t *testing.T) {
|
|||
|
||||
req := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSLock,
|
||||
Op: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Value: []byte("test"),
|
||||
|
@ -1044,7 +1046,7 @@ func TestFSM_KVSUnlock(t *testing.T) {
|
|||
|
||||
req = structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSUnlock,
|
||||
Op: api.KVUnlock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Value: []byte("test"),
|
||||
|
@ -1304,7 +1306,7 @@ func TestFSM_Txn(t *testing.T) {
|
|||
Ops: structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSSet,
|
||||
Verb: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/path",
|
||||
Flags: 0,
|
||||
|
|
|
@ -5,9 +5,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -18,7 +19,7 @@ func TestHealth_ChecksInState(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -26,7 +27,7 @@ func TestHealth_ChecksInState(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memory utilization",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
|
@ -37,7 +38,7 @@ func TestHealth_ChecksInState(t *testing.T) {
|
|||
var out2 structs.IndexedHealthChecks
|
||||
inState := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: structs.HealthPassing,
|
||||
State: api.HealthPassing,
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out2); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -64,7 +65,7 @@ func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -76,7 +77,7 @@ func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memory utilization",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
|
@ -92,7 +93,7 @@ func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "disk space",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
||||
|
@ -133,7 +134,7 @@ func TestHealth_ChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
inState := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
NodeMetaFilters: tc.filters,
|
||||
State: structs.HealthPassing,
|
||||
State: api.HealthPassing,
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &inState, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -159,7 +160,7 @@ func TestHealth_ChecksInState_DistanceSort(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -180,7 +181,7 @@ func TestHealth_ChecksInState_DistanceSort(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memory utilization",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -198,7 +199,7 @@ func TestHealth_ChecksInState_DistanceSort(t *testing.T) {
|
|||
var out2 structs.IndexedHealthChecks
|
||||
inState := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: structs.HealthPassing,
|
||||
State: api.HealthPassing,
|
||||
Source: structs.QuerySource{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
|
@ -236,7 +237,7 @@ func TestHealth_NodeChecks(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -244,7 +245,7 @@ func TestHealth_NodeChecks(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memory utilization",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
|
@ -277,7 +278,7 @@ func TestHealth_ServiceChecks(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -289,7 +290,7 @@ func TestHealth_ServiceChecks(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -323,7 +324,7 @@ func TestHealth_ServiceChecks_NodeMetaFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -339,7 +340,7 @@ func TestHealth_ServiceChecks_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memory utilization",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -360,7 +361,7 @@ func TestHealth_ServiceChecks_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "disk space",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -428,7 +429,7 @@ func TestHealth_ServiceChecks_DistanceSort(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -453,7 +454,7 @@ func TestHealth_ServiceChecks_DistanceSort(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -516,7 +517,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -529,7 +530,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -549,7 +550,7 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -584,10 +585,10 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
|||
if !lib.StrContains(nodes[1].Service.Tags, "master") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != structs.HealthWarning {
|
||||
if nodes[0].Checks[0].Status != api.HealthWarning {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[1].Checks[0].Status != structs.HealthPassing {
|
||||
if nodes[1].Checks[0].Status != api.HealthPassing {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
}
|
||||
|
@ -599,7 +600,7 @@ func TestHealth_ServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -615,7 +616,7 @@ func TestHealth_ServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "memory utilization",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -637,7 +638,7 @@ func TestHealth_ServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "disk space",
|
||||
Status: structs.HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -729,7 +730,7 @@ func TestHealth_ServiceNodes_DistanceSort(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.2"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -754,7 +755,7 @@ func TestHealth_ServiceNodes_DistanceSort(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -930,7 +931,7 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) {
|
|||
|
||||
opt := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: structs.HealthPassing,
|
||||
State: api.HealthPassing,
|
||||
QueryOptions: structs.QueryOptions{Token: token},
|
||||
}
|
||||
reply := structs.IndexedHealthChecks{}
|
||||
|
|
|
@ -6,9 +6,10 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -19,7 +20,7 @@ func TestInternal_NodeInfo(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -32,7 +33,7 @@ func TestInternal_NodeInfo(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -60,7 +61,7 @@ func TestInternal_NodeInfo(t *testing.T) {
|
|||
if !lib.StrContains(nodes[0].Services[0].Tags, "master") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != structs.HealthPassing {
|
||||
if nodes[0].Checks[0].Status != api.HealthPassing {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +73,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -85,7 +86,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -105,7 +106,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: structs.HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -134,7 +135,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
if !lib.StrContains(node.Services[0].Tags, "master") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if node.Checks[0].Status != structs.HealthPassing {
|
||||
if node.Checks[0].Status != api.HealthPassing {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
|
||||
|
@ -143,7 +144,7 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
if !lib.StrContains(node.Services[0].Tags, "slave") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if node.Checks[0].Status != structs.HealthWarning {
|
||||
if node.Checks[0].Status != api.HealthWarning {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
|
||||
|
@ -171,7 +172,7 @@ func TestInternal_KeyringOperation(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
var out structs.KeyringResponses
|
||||
req := structs.KeyringRequest{
|
||||
|
@ -354,7 +355,7 @@ func TestInternal_EventFire_Token(t *testing.T) {
|
|||
codec := rpcClient(t, srv)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, srv.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, srv.RPC, "dc1")
|
||||
|
||||
// No token is rejected
|
||||
event := structs.EventFireRequest{
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
|
@ -28,7 +29,7 @@ func TestHealthCheckRace(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "db",
|
||||
Name: "db connectivity",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
@ -54,7 +55,7 @@ func TestHealthCheckRace(t *testing.T) {
|
|||
}
|
||||
|
||||
// Update the check state
|
||||
req.Check.Status = structs.HealthCritical
|
||||
req.Check.Status = api.HealthCritical
|
||||
buf, err = structs.Encode(structs.RegisterRequestType, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/state"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
@ -19,24 +20,24 @@ type KVS struct {
|
|||
// preApply does all the verification of a KVS update that is performed BEFORE
|
||||
// we submit as a Raft log entry. This includes enforcing the lock delay which
|
||||
// must only be done on the leader.
|
||||
func kvsPreApply(srv *Server, acl acl.ACL, op structs.KVSOp, dirEnt *structs.DirEntry) (bool, error) {
|
||||
func kvsPreApply(srv *Server, acl acl.ACL, op api.KVOp, dirEnt *structs.DirEntry) (bool, error) {
|
||||
// Verify the entry.
|
||||
if dirEnt.Key == "" && op != structs.KVSDeleteTree {
|
||||
if dirEnt.Key == "" && op != api.KVDeleteTree {
|
||||
return false, fmt.Errorf("Must provide key")
|
||||
}
|
||||
|
||||
// Apply the ACL policy if any.
|
||||
if acl != nil {
|
||||
switch op {
|
||||
case structs.KVSDeleteTree:
|
||||
case api.KVDeleteTree:
|
||||
if !acl.KeyWritePrefix(dirEnt.Key) {
|
||||
return false, permissionDeniedErr
|
||||
}
|
||||
|
||||
case structs.KVSGet, structs.KVSGetTree:
|
||||
case api.KVGet, api.KVGetTree:
|
||||
// Filtering for GETs is done on the output side.
|
||||
|
||||
case structs.KVSCheckSession, structs.KVSCheckIndex:
|
||||
case api.KVCheckSession, api.KVCheckIndex:
|
||||
// These could reveal information based on the outcome
|
||||
// of the transaction, and they operate on individual
|
||||
// keys so we check them here.
|
||||
|
@ -57,7 +58,7 @@ func kvsPreApply(srv *Server, acl acl.ACL, op structs.KVSOp, dirEnt *structs.Dir
|
|||
// after the raft log is committed as it would lead to inconsistent FSMs.
|
||||
// Instead, the lock-delay must be enforced before commit. This means that
|
||||
// only the wall-time of the leader node is used, preventing any inconsistencies.
|
||||
if op == structs.KVSLock {
|
||||
if op == api.KVLock {
|
||||
state := srv.fsm.State()
|
||||
expires := state.KVSLockDelay(dirEnt.Key)
|
||||
if expires.After(time.Now()) {
|
||||
|
|
|
@ -6,8 +6,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -18,11 +19,11 @@ func TestKVS_Apply(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Flags: 42,
|
||||
|
@ -45,7 +46,7 @@ func TestKVS_Apply(t *testing.T) {
|
|||
}
|
||||
|
||||
// Do a check and set
|
||||
arg.Op = structs.KVSCAS
|
||||
arg.Op = api.KVCAS
|
||||
arg.DirEnt.ModifyIndex = d.ModifyIndex
|
||||
arg.DirEnt.Flags = 43
|
||||
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &arg, &out); err != nil {
|
||||
|
@ -78,7 +79,7 @@ func TestKVS_Apply_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create the ACL
|
||||
arg := structs.ACLRequest{
|
||||
|
@ -100,7 +101,7 @@ func TestKVS_Apply_ACLDeny(t *testing.T) {
|
|||
// Try a write
|
||||
argR := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar",
|
||||
Flags: 42,
|
||||
|
@ -117,7 +118,7 @@ func TestKVS_Apply_ACLDeny(t *testing.T) {
|
|||
// Try a recursive delete
|
||||
argR = structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSDeleteTree,
|
||||
Op: api.KVDeleteTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
},
|
||||
|
@ -136,11 +137,11 @@ func TestKVS_Get(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Flags: 42,
|
||||
|
@ -187,11 +188,11 @@ func TestKVS_Get_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "zip",
|
||||
Flags: 42,
|
||||
|
@ -228,7 +229,7 @@ func TestKVSEndpoint_List(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"/test/key1",
|
||||
|
@ -239,7 +240,7 @@ func TestKVSEndpoint_List(t *testing.T) {
|
|||
for _, key := range keys {
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Flags: 1,
|
||||
|
@ -299,7 +300,7 @@ func TestKVSEndpoint_List_Blocking(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"/test/key1",
|
||||
|
@ -310,7 +311,7 @@ func TestKVSEndpoint_List_Blocking(t *testing.T) {
|
|||
for _, key := range keys {
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Flags: 1,
|
||||
|
@ -343,7 +344,7 @@ func TestKVSEndpoint_List_Blocking(t *testing.T) {
|
|||
defer codec.Close()
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSDelete,
|
||||
Op: api.KVDelete,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "/test/sub/key3",
|
||||
},
|
||||
|
@ -398,7 +399,7 @@ func TestKVSEndpoint_List_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"abe",
|
||||
|
@ -411,7 +412,7 @@ func TestKVSEndpoint_List_ACLDeny(t *testing.T) {
|
|||
for _, key := range keys {
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Flags: 1,
|
||||
|
@ -478,7 +479,7 @@ func TestKVSEndpoint_ListKeys(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"/test/key1",
|
||||
|
@ -489,7 +490,7 @@ func TestKVSEndpoint_ListKeys(t *testing.T) {
|
|||
for _, key := range keys {
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Flags: 1,
|
||||
|
@ -551,7 +552,7 @@ func TestKVSEndpoint_ListKeys_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
keys := []string{
|
||||
"abe",
|
||||
|
@ -564,7 +565,7 @@ func TestKVSEndpoint_ListKeys_ACLDeny(t *testing.T) {
|
|||
for _, key := range keys {
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: key,
|
||||
Flags: 1,
|
||||
|
@ -625,7 +626,7 @@ func TestKVS_Apply_LockDelay(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create and invalidate a session with a lock.
|
||||
state := s1.fsm.State()
|
||||
|
@ -661,7 +662,7 @@ func TestKVS_Apply_LockDelay(t *testing.T) {
|
|||
// Make a lock request.
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSLock,
|
||||
Op: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Session: validId,
|
||||
|
@ -694,13 +695,13 @@ func TestKVS_Issue_1626(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Set up the first key.
|
||||
{
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/test",
|
||||
Value: []byte("test"),
|
||||
|
@ -763,7 +764,7 @@ func TestKVS_Issue_1626(t *testing.T) {
|
|||
{
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/test2",
|
||||
Value: []byte("test"),
|
||||
|
@ -786,7 +787,7 @@ func TestKVS_Issue_1626(t *testing.T) {
|
|||
{
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/test",
|
||||
Value: []byte("updated"),
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/agent"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -310,7 +311,7 @@ func (s *Server) reconcile() (err error) {
|
|||
// a "reap" event to cause the node to be cleaned up.
|
||||
func (s *Server) reconcileReaped(known map[string]struct{}) error {
|
||||
state := s.fsm.State()
|
||||
_, checks, err := state.ChecksInState(nil, structs.HealthAny)
|
||||
_, checks, err := state.ChecksInState(nil, api.HealthAny)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -455,7 +456,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
|
|||
return err
|
||||
}
|
||||
for _, check := range checks {
|
||||
if check.CheckID == SerfCheckID && check.Status == structs.HealthPassing {
|
||||
if check.CheckID == SerfCheckID && check.Status == api.HealthPassing {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -474,7 +475,7 @@ AFTER_CHECK:
|
|||
Node: member.Name,
|
||||
CheckID: SerfCheckID,
|
||||
Name: SerfCheckName,
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Output: SerfCheckAliveOutput,
|
||||
},
|
||||
|
||||
|
@ -502,7 +503,7 @@ func (s *Server) handleFailedMember(member serf.Member) error {
|
|||
return err
|
||||
}
|
||||
for _, check := range checks {
|
||||
if check.CheckID == SerfCheckID && check.Status == structs.HealthCritical {
|
||||
if check.CheckID == SerfCheckID && check.Status == api.HealthCritical {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -519,7 +520,7 @@ func (s *Server) handleFailedMember(member serf.Member) error {
|
|||
Node: member.Name,
|
||||
CheckID: SerfCheckID,
|
||||
Name: SerfCheckName,
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
Output: SerfCheckFailedOutput,
|
||||
},
|
||||
|
||||
|
|
|
@ -7,8 +7,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
@ -34,11 +35,11 @@ func TestLeader_RegisterMember(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Client should be registered
|
||||
state := s1.fsm.State()
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -62,7 +63,7 @@ func TestLeader_RegisterMember(t *testing.T) {
|
|||
if checks[0].Name != SerfCheckName {
|
||||
t.Fatalf("bad check: %v", checks[0])
|
||||
}
|
||||
if checks[0].Status != structs.HealthPassing {
|
||||
if checks[0].Status != api.HealthPassing {
|
||||
t.Fatalf("bad check: %v", checks[0])
|
||||
}
|
||||
|
||||
|
@ -99,7 +100,7 @@ func TestLeader_FailedMember(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer c1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to join
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -113,7 +114,7 @@ func TestLeader_FailedMember(t *testing.T) {
|
|||
|
||||
// Should be registered
|
||||
state := s1.fsm.State()
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -138,12 +139,12 @@ func TestLeader_FailedMember(t *testing.T) {
|
|||
t.Fatalf("bad check: %v", checks[0])
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, checks, err = state.NodeChecks(nil, c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
return checks[0].Status == structs.HealthCritical, errors.New(checks[0].Status)
|
||||
return checks[0].Status == api.HealthCritical, errors.New(checks[0].Status)
|
||||
}); err != nil {
|
||||
t.Fatalf("check status is %v, should be critical", err)
|
||||
}
|
||||
|
@ -173,7 +174,7 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
state := s1.fsm.State()
|
||||
|
||||
// Should be registered
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -188,7 +189,7 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
c1.Shutdown()
|
||||
|
||||
// Should be deregistered
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -223,7 +224,7 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
state := s1.fsm.State()
|
||||
|
||||
// Should be registered
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -273,7 +274,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Register a non-existing member
|
||||
dead := structs.RegisterRequest{
|
||||
|
@ -284,7 +285,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) {
|
|||
Node: "no-longer-around",
|
||||
CheckID: SerfCheckID,
|
||||
Name: SerfCheckName,
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Token: "root",
|
||||
|
@ -343,7 +344,7 @@ func TestLeader_Reconcile(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should be registered
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err = state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -359,7 +360,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
dir2, c1 := testClient(t)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
@ -374,7 +375,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
// Wait for the server to reconcile the client and register it.
|
||||
state := s1.fsm.State()
|
||||
var nodeAddr string
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -402,7 +403,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
Node: c1.config.NodeName,
|
||||
CheckID: SerfCheckID,
|
||||
Name: SerfCheckName,
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
Output: "",
|
||||
},
|
||||
}
|
||||
|
@ -428,12 +429,12 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
|||
|
||||
// Fail the member and wait for the health to go critical.
|
||||
c1.Shutdown()
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
return checks[0].Status == structs.HealthCritical, errors.New(checks[0].Status)
|
||||
return checks[0].Status == api.HealthCritical, errors.New(checks[0].Status)
|
||||
}); err != nil {
|
||||
t.Fatalf("check status should be critical: %v", err)
|
||||
}
|
||||
|
@ -476,7 +477,7 @@ func TestLeader_LeftServer(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -484,7 +485,7 @@ func TestLeader_LeftServer(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
// Kill any server
|
||||
servers[0].Shutdown()
|
||||
|
||||
|
@ -529,7 +530,7 @@ func TestLeader_LeftLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -558,7 +559,7 @@ func TestLeader_LeftLeader(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
remain = s
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 2, errors.New(fmt.Sprintf("%d", peers))
|
||||
}); err != nil {
|
||||
|
@ -568,7 +569,7 @@ func TestLeader_LeftLeader(t *testing.T) {
|
|||
|
||||
// Verify the old leader is deregistered
|
||||
state := remain.fsm.State()
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
_, node, err := state.GetNode(leader.config.NodeName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -598,7 +599,7 @@ func TestLeader_MultiBootstrap(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers := s.serfLAN.Members()
|
||||
return len(peers) == 2, nil
|
||||
}); err != nil {
|
||||
|
@ -640,7 +641,7 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -670,7 +671,7 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
|||
|
||||
// Wait for a new leader
|
||||
leader = nil
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
for _, s := range servers {
|
||||
if s.IsLeader() {
|
||||
leader = s
|
||||
|
@ -683,7 +684,7 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check that the new leader has a pending GC expiration
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return leader.tombstoneGC.PendingExpiration(), nil
|
||||
}); err != nil {
|
||||
t.Fatal("should have pending expiration")
|
||||
|
@ -702,12 +703,12 @@ func TestLeader_ReapTombstones(t *testing.T) {
|
|||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a KV entry
|
||||
arg := structs.KVSRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Value: []byte("test"),
|
||||
|
@ -722,7 +723,7 @@ func TestLeader_ReapTombstones(t *testing.T) {
|
|||
}
|
||||
|
||||
// Delete the KV entry (tombstoned).
|
||||
arg.Op = structs.KVSDelete
|
||||
arg.Op = api.KVDelete
|
||||
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -746,7 +747,7 @@ func TestLeader_ReapTombstones(t *testing.T) {
|
|||
|
||||
// Check that the new leader has a pending GC expiration by
|
||||
// watching for the tombstone to get removed.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
snap := state.Snapshot()
|
||||
defer snap.Close()
|
||||
stones, err := snap.Tombstones()
|
||||
|
@ -792,7 +793,7 @@ func TestLeader_RollRaftServer(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -804,7 +805,7 @@ func TestLeader_RollRaftServer(t *testing.T) {
|
|||
s2.Shutdown()
|
||||
|
||||
for _, s := range []*Server{s1, s3} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
minVer, err := ServerMinRaftProtocol(s.LANMembers())
|
||||
return minVer == 2, err
|
||||
}); err != nil {
|
||||
|
@ -827,7 +828,7 @@ func TestLeader_RollRaftServer(t *testing.T) {
|
|||
|
||||
// Make sure the dead server is removed and we're back to 3 total peers
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
addrs := 0
|
||||
ids := 0
|
||||
future := s.raft.GetConfiguration()
|
||||
|
@ -880,7 +881,7 @@ func TestLeader_ChangeServerID(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -891,7 +892,7 @@ func TestLeader_ChangeServerID(t *testing.T) {
|
|||
// Shut down a server, freeing up its address/port
|
||||
s3.Shutdown()
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
alive := 0
|
||||
for _, m := range s1.LANMembers() {
|
||||
if m.Status == serf.StatusAlive {
|
||||
|
@ -922,7 +923,7 @@ func TestLeader_ChangeServerID(t *testing.T) {
|
|||
|
||||
// Make sure the dead server is removed and we're back to 3 total peers
|
||||
for _, s := range servers {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/raft"
|
||||
)
|
||||
|
@ -22,7 +22,7 @@ func TestOperator_Autopilot_GetConfiguration(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -49,7 +49,7 @@ func TestOperator_Autopilot_GetConfiguration_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to get config without permissions
|
||||
arg := structs.DCSpecificRequest{
|
||||
|
@ -103,7 +103,7 @@ func TestOperator_Autopilot_SetConfiguration(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Change the autopilot config from the default
|
||||
arg := structs.AutopilotSetConfigRequest{
|
||||
|
@ -141,7 +141,7 @@ func TestOperator_Autopilot_SetConfiguration_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to set config without permissions
|
||||
arg := structs.AutopilotSetConfigRequest{
|
||||
|
@ -227,9 +227,9 @@ func TestOperator_ServerHealth(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/raft"
|
||||
)
|
||||
|
@ -20,7 +20,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -66,7 +66,7 @@ func TestOperator_RaftGetConfiguration_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Make a request with no token to make sure it gets denied.
|
||||
arg := structs.DCSpecificRequest{
|
||||
|
@ -138,7 +138,7 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to remove a peer that's not there.
|
||||
arg := structs.RaftRemovePeerRequest{
|
||||
|
@ -200,7 +200,7 @@ func TestOperator_RaftRemovePeerByAddress_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Make a request with no token to make sure it gets denied.
|
||||
arg := structs.RaftRemovePeerRequest{
|
||||
|
@ -253,7 +253,7 @@ func TestOperator_RaftRemovePeerByID(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Try to remove a peer that's not there.
|
||||
arg := structs.RaftRemovePeerRequest{
|
||||
|
@ -316,7 +316,7 @@ func TestOperator_RaftRemovePeerByID_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Make a request with no token to make sure it gets denied.
|
||||
arg := structs.RaftRemovePeerRequest{
|
||||
|
|
|
@ -12,8 +12,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
)
|
||||
|
@ -25,7 +26,7 @@ func TestPreparedQuery_Apply(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Set up a bare bones query.
|
||||
query := structs.PreparedQueryRequest{
|
||||
|
@ -189,7 +190,7 @@ func TestPreparedQuery_Apply_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create an ACL with write permissions for redis queries.
|
||||
var token string
|
||||
|
@ -481,8 +482,8 @@ func TestPreparedQuery_Apply_ForwardLeader(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Use the follower as the client.
|
||||
var codec rpc.ClientCodec
|
||||
|
@ -629,7 +630,7 @@ func TestPreparedQuery_ACLDeny_Catchall_Template(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create an ACL with write permissions for any prefix.
|
||||
var token string
|
||||
|
@ -842,7 +843,7 @@ func TestPreparedQuery_Get(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create an ACL with write permissions for redis queries.
|
||||
var token string
|
||||
|
@ -1093,7 +1094,7 @@ func TestPreparedQuery_List(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create an ACL with write permissions for redis queries.
|
||||
var token string
|
||||
|
@ -1299,7 +1300,7 @@ func TestPreparedQuery_Explain(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create an ACL with write permissions for prod- queries.
|
||||
var token string
|
||||
|
@ -1444,8 +1445,8 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
codec2 := rpcClient(t, s2)
|
||||
defer codec2.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Try to WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -1453,7 +1454,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
@ -1987,7 +1988,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
setHealth("node1", structs.HealthCritical)
|
||||
setHealth("node1", api.HealthCritical)
|
||||
|
||||
// The failing node should be filtered.
|
||||
{
|
||||
|
@ -2017,7 +2018,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
}
|
||||
|
||||
// Upgrade it to a warning and re-query, should be 10 nodes again.
|
||||
setHealth("node1", structs.HealthWarning)
|
||||
setHealth("node1", api.HealthWarning)
|
||||
{
|
||||
req := structs.PreparedQueryExecuteRequest{
|
||||
Datacenter: "dc1",
|
||||
|
@ -2269,7 +2270,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
|
||||
// Now fail everything in dc1 and we should get an empty list back.
|
||||
for i := 0; i < 10; i++ {
|
||||
setHealth(fmt.Sprintf("node%d", i+1), structs.HealthCritical)
|
||||
setHealth(fmt.Sprintf("node%d", i+1), api.HealthCritical)
|
||||
}
|
||||
{
|
||||
req := structs.PreparedQueryExecuteRequest{
|
||||
|
@ -2468,8 +2469,8 @@ func TestPreparedQuery_Execute_ForwardLeader(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Use the follower as the client.
|
||||
var codec rpc.ClientCodec
|
||||
|
@ -2693,8 +2694,8 @@ func TestPreparedQuery_Wrapper(t *testing.T) {
|
|||
codec2 := rpcClient(t, s2)
|
||||
defer codec2.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Try to WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -2702,7 +2703,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed waiting for WAN join: %v", err)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/consul/state"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
@ -37,7 +37,7 @@ func TestRPC_NoLeader_Fail(t *testing.T) {
|
|||
}
|
||||
|
||||
// Now make sure it goes through.
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %v", err)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -137,7 +137,7 @@ func TestRTT_sortNodesByDistanceFrom(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, server)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, server.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
|
||||
seedCoordinates(t, codec, server)
|
||||
nodes := structs.Nodes{
|
||||
|
@ -189,7 +189,7 @@ func TestRTT_sortNodesByDistanceFrom_Nodes(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, server)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, server.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
|
||||
seedCoordinates(t, codec, server)
|
||||
nodes := structs.Nodes{
|
||||
|
@ -238,7 +238,7 @@ func TestRTT_sortNodesByDistanceFrom_ServiceNodes(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, server)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, server.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
|
||||
seedCoordinates(t, codec, server)
|
||||
nodes := structs.ServiceNodes{
|
||||
|
@ -287,7 +287,7 @@ func TestRTT_sortNodesByDistanceFrom_HealthChecks(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, server)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, server.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
|
||||
seedCoordinates(t, codec, server)
|
||||
checks := structs.HealthChecks{
|
||||
|
@ -336,7 +336,7 @@ func TestRTT_sortNodesByDistanceFrom_CheckServiceNodes(t *testing.T) {
|
|||
|
||||
codec := rpcClient(t, server)
|
||||
defer codec.Close()
|
||||
testutil.WaitForLeader(t, server.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
|
||||
seedCoordinates(t, codec, server)
|
||||
nodes := structs.CheckServiceNodes{
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
@ -158,13 +158,13 @@ func TestServer_JoinLAN(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the members
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s2.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
|
@ -188,13 +188,13 @@ func TestServer_JoinWAN(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the members
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.WANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s2.WANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
|
@ -205,7 +205,7 @@ func TestServer_JoinWAN(t *testing.T) {
|
|||
t.Fatalf("remote consul missing")
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s2.router.GetDatacenters()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("remote consul missing")
|
||||
|
@ -229,7 +229,7 @@ func TestServer_JoinWAN_Flood(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, s := range []*Server{s1, s2} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s.WANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("bad len for server %d", i)
|
||||
|
@ -249,7 +249,7 @@ func TestServer_JoinWAN_Flood(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, s := range []*Server{s1, s2, s3} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s.WANMembers()) == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("bad len for server %d", i)
|
||||
|
@ -293,28 +293,28 @@ func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the WAN members on s1
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.WANMembers()) == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
// Check the WAN members on s2
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s2.WANMembers()) == 3, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
// Check the LAN members on s2
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s2.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
// Check the LAN members on s3
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s3.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
|
@ -376,14 +376,14 @@ func TestServer_LeaveLeader(t *testing.T) {
|
|||
var p1 int
|
||||
var p2 int
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 2 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p2, _ = s2.numPeers()
|
||||
return p2 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
|
@ -402,7 +402,7 @@ func TestServer_LeaveLeader(t *testing.T) {
|
|||
|
||||
// Should lose a peer
|
||||
for _, s := range []*Server{s1, s2} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s.numPeers()
|
||||
return p1 == 1, nil
|
||||
}); err != nil {
|
||||
|
@ -431,14 +431,14 @@ func TestServer_Leave(t *testing.T) {
|
|||
var p1 int
|
||||
var p2 int
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s1.numPeers()
|
||||
return p1 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 2 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p2, _ = s2.numPeers()
|
||||
return p2 == 2, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
|
@ -457,7 +457,7 @@ func TestServer_Leave(t *testing.T) {
|
|||
|
||||
// Should lose a peer
|
||||
for _, s := range []*Server{s1, s2} {
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s.numPeers()
|
||||
return p1 == 1, nil
|
||||
}); err != nil {
|
||||
|
@ -509,27 +509,27 @@ func TestServer_JoinLAN_TLS(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check the members
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s2.LANMembers()) == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatal("bad len")
|
||||
}
|
||||
|
||||
// Verify Raft has established a peer
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s1.numPeers()
|
||||
return peers == 2, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("no peers")
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s2.numPeers()
|
||||
return peers == 2, nil
|
||||
}); err != nil {
|
||||
|
@ -568,14 +568,14 @@ func TestServer_Expect(t *testing.T) {
|
|||
var p2 int
|
||||
|
||||
// Should have no peers yet since the bootstrap didn't occur.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s1.numPeers()
|
||||
return p1 == 0, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 0 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p2, _ = s2.numPeers()
|
||||
return p2 == 0, errors.New(fmt.Sprintf("%d", p2))
|
||||
}); err != nil {
|
||||
|
@ -590,21 +590,21 @@ func TestServer_Expect(t *testing.T) {
|
|||
var p3 int
|
||||
|
||||
// Now we have three servers so we should bootstrap.
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s1.numPeers()
|
||||
return p1 == 3, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 3 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p2, _ = s2.numPeers()
|
||||
return p2 == 3, errors.New(fmt.Sprintf("%d", p2))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 3 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p3, _ = s3.numPeers()
|
||||
return p3 == 3, errors.New(fmt.Sprintf("%d", p3))
|
||||
}); err != nil {
|
||||
|
@ -613,7 +613,7 @@ func TestServer_Expect(t *testing.T) {
|
|||
|
||||
// Make sure a leader is elected, grab the current term and then add in
|
||||
// the fourth server.
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
termBefore := s1.raft.Stats()["last_log_term"]
|
||||
if _, err := s4.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -621,7 +621,7 @@ func TestServer_Expect(t *testing.T) {
|
|||
|
||||
// Wait for the new server to see itself added to the cluster.
|
||||
var p4 int
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p4, _ = s4.numPeers()
|
||||
return p4 == 4, errors.New(fmt.Sprintf("%d", p4))
|
||||
}); err != nil {
|
||||
|
@ -630,7 +630,7 @@ func TestServer_Expect(t *testing.T) {
|
|||
|
||||
// Make sure there's still a leader and that the term didn't change,
|
||||
// so we know an election didn't occur.
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
termAfter := s1.raft.Stats()["last_log_term"]
|
||||
if termAfter != termBefore {
|
||||
t.Fatalf("looks like an election took place")
|
||||
|
@ -664,14 +664,14 @@ func TestServer_BadExpect(t *testing.T) {
|
|||
var p2 int
|
||||
|
||||
// should have no peers yet
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s1.numPeers()
|
||||
return p1 == 0, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 0 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p2, _ = s2.numPeers()
|
||||
return p2 == 0, errors.New(fmt.Sprintf("%d", p2))
|
||||
}); err != nil {
|
||||
|
@ -686,21 +686,21 @@ func TestServer_BadExpect(t *testing.T) {
|
|||
var p3 int
|
||||
|
||||
// should still have no peers (because s2 is in expect=2 mode)
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p1, _ = s1.numPeers()
|
||||
return p1 == 0, errors.New(fmt.Sprintf("%d", p1))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 0 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p2, _ = s2.numPeers()
|
||||
return p2 == 0, errors.New(fmt.Sprintf("%d", p2))
|
||||
}); err != nil {
|
||||
t.Fatalf("should have 0 peers %s", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
p3, _ = s3.numPeers()
|
||||
return p3 == 0, errors.New(fmt.Sprintf("%d", p3))
|
||||
}); err != nil {
|
||||
|
@ -723,7 +723,7 @@ func TestServer_globalRPCErrors(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.router.GetDatacenters()) == 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("did not join WAN")
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -19,7 +19,7 @@ func TestSession_Apply(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Just add a node
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
|
||||
|
@ -78,7 +78,7 @@ func TestSession_DeleteApply(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Just add a node
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
|
||||
|
@ -146,7 +146,7 @@ func TestSession_Apply_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create the ACL.
|
||||
req := structs.ACLRequest{
|
||||
|
@ -235,7 +235,7 @@ func TestSession_Get(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
|
||||
arg := structs.SessionRequest{
|
||||
|
@ -278,7 +278,7 @@ func TestSession_List(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
|
||||
ids := []string{}
|
||||
|
@ -334,7 +334,7 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create the ACL.
|
||||
req := structs.ACLRequest{
|
||||
|
@ -497,7 +497,7 @@ func TestSession_ApplyTimers(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
|
||||
arg := structs.SessionRequest{
|
||||
|
@ -538,7 +538,7 @@ func TestSession_Renew(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
TTL := "10s" // the minimum allowed ttl
|
||||
ttl := 10 * time.Second
|
||||
|
||||
|
@ -703,7 +703,7 @@ func TestSession_Renew_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create the ACL.
|
||||
req := structs.ACLRequest{
|
||||
|
@ -776,7 +776,7 @@ func TestSession_NodeSessions(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
|
||||
s1.fsm.State().EnsureNode(1, &structs.Node{Node: "bar", Address: "127.0.0.1"})
|
||||
|
@ -834,7 +834,7 @@ func TestSession_Apply_BadTTL(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.SessionRequest{
|
||||
Datacenter: "dc1",
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -17,7 +17,7 @@ func TestInitializeSessionTimers(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
state := s1.fsm.State()
|
||||
if err := state.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
|
@ -50,7 +50,7 @@ func TestResetSessionTimer_Fault(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Should not exist
|
||||
err := s1.resetSessionTimer(generateUUID(), nil)
|
||||
|
@ -90,7 +90,7 @@ func TestResetSessionTimer_NoTTL(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a session
|
||||
state := s1.fsm.State()
|
||||
|
@ -143,7 +143,7 @@ func TestResetSessionTimerLocked(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
s1.sessionTimersLock.Lock()
|
||||
s1.resetSessionTimerLocked("foo", 5*time.Millisecond)
|
||||
|
@ -165,7 +165,7 @@ func TestResetSessionTimerLocked_Renew(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
s1.sessionTimersLock.Lock()
|
||||
s1.resetSessionTimerLocked("foo", 5*time.Millisecond)
|
||||
|
@ -205,7 +205,7 @@ func TestInvalidateSession(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a session
|
||||
state := s1.fsm.State()
|
||||
|
@ -298,7 +298,7 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s1.numPeers()
|
||||
return peers == 3, nil
|
||||
}); err != nil {
|
||||
|
@ -363,7 +363,7 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
|
|||
}
|
||||
|
||||
// Find the new leader
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
leader = nil
|
||||
for _, s := range servers {
|
||||
if s.IsLeader() {
|
||||
|
|
|
@ -7,8 +7,9 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -21,7 +22,7 @@ func verifySnapshot(t *testing.T, s *Server, dc, token string) {
|
|||
{
|
||||
args := structs.KVSRequest{
|
||||
Datacenter: dc,
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Value: []byte("hello"),
|
||||
|
@ -76,7 +77,7 @@ func verifySnapshot(t *testing.T, s *Server, dc, token string) {
|
|||
{
|
||||
args := structs.KVSRequest{
|
||||
Datacenter: dc,
|
||||
Op: structs.KVSSet,
|
||||
Op: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Value: []byte("goodbye"),
|
||||
|
@ -150,7 +151,7 @@ func TestSnapshot(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
verifySnapshot(t, s1, "dc1", "")
|
||||
}
|
||||
|
||||
|
@ -159,7 +160,7 @@ func TestSnapshot_LeaderState(t *testing.T) {
|
|||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
@ -247,7 +248,7 @@ func TestSnapshot_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Take a snapshot.
|
||||
func() {
|
||||
|
@ -301,8 +302,8 @@ func TestSnapshot_Forward_Leader(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
||||
|
||||
// Run against the leader and the follower to ensure we forward.
|
||||
for _, s := range []*Server{s1, s2} {
|
||||
|
@ -320,8 +321,8 @@ func TestSnapshot_Forward_Datacenter(t *testing.T) {
|
|||
defer os.RemoveAll(dir2)
|
||||
defer s2.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testutil.WaitForLeader(t, s2.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Try to WAN join.
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
|
@ -329,7 +330,7 @@ func TestSnapshot_Forward_Datacenter(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := testutil.WaitForResult(func() (bool, error) {
|
||||
if err := testrpc.WaitForResult(func() (bool, error) {
|
||||
return len(s1.WANMembers()) > 1, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to join WAN: %s", err)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
@ -885,7 +886,7 @@ func (s *StateStore) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.Healt
|
|||
|
||||
// Use the default check status if none was provided
|
||||
if hc.Status == "" {
|
||||
hc.Status = structs.HealthCritical
|
||||
hc.Status = api.HealthCritical
|
||||
}
|
||||
|
||||
// Get the node
|
||||
|
@ -913,7 +914,7 @@ func (s *StateStore) ensureCheckTxn(tx *memdb.Txn, idx uint64, hc *structs.Healt
|
|||
}
|
||||
|
||||
// Delete any sessions for this check if the health is critical.
|
||||
if hc.Status == structs.HealthCritical {
|
||||
if hc.Status == api.HealthCritical {
|
||||
mappings, err := tx.Get("session_checks", "node_check", hc.Node, string(hc.CheckID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed session checks lookup: %s", err)
|
||||
|
@ -1047,7 +1048,7 @@ func (s *StateStore) ChecksInState(ws memdb.WatchSet, state string) (uint64, str
|
|||
// Query all checks if HealthAny is passed, otherwise use the index.
|
||||
var iter memdb.ResultIterator
|
||||
var err error
|
||||
if state == structs.HealthAny {
|
||||
if state == api.HealthAny {
|
||||
iter, err = tx.Get("checks", "status")
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
|
@ -1079,7 +1080,7 @@ func (s *StateStore) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, fi
|
|||
// Query all checks if HealthAny is passed, otherwise use the index.
|
||||
var iter memdb.ResultIterator
|
||||
var err error
|
||||
if state == structs.HealthAny {
|
||||
if state == api.HealthAny {
|
||||
iter, err = tx.Get("checks", "status")
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -734,7 +735,7 @@ func TestStateStore_DeleteNode(t *testing.T) {
|
|||
// Create a node and register a service and health check with it.
|
||||
testRegisterNode(t, s, 0, "node1")
|
||||
testRegisterService(t, s, 1, "node1", "service1")
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check1", api.HealthPassing)
|
||||
|
||||
// Delete the node
|
||||
if err := s.DeleteNode(3, "node1"); err != nil {
|
||||
|
@ -1473,7 +1474,7 @@ func TestStateStore_DeleteService(t *testing.T) {
|
|||
// Register a node with one service and a check.
|
||||
testRegisterNode(t, s, 1, "node1")
|
||||
testRegisterService(t, s, 2, "node1", "service1")
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check1", api.HealthPassing)
|
||||
|
||||
// Delete the service.
|
||||
ws := memdb.NewWatchSet()
|
||||
|
@ -1593,7 +1594,7 @@ func TestStateStore_EnsureCheck(t *testing.T) {
|
|||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "redis check",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Notes: "test check",
|
||||
Output: "aaa",
|
||||
ServiceID: "service1",
|
||||
|
@ -1689,7 +1690,7 @@ func TestStateStore_EnsureCheck_defaultStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check that the status was set to the proper default
|
||||
if len(result) != 1 || result[0].Status != structs.HealthCritical {
|
||||
if len(result) != 1 || result[0].Status != api.HealthCritical {
|
||||
t.Fatalf("bad: %#v", result)
|
||||
}
|
||||
}
|
||||
|
@ -1713,11 +1714,11 @@ func TestStateStore_NodeChecks(t *testing.T) {
|
|||
// Create some nodes and checks.
|
||||
testRegisterNode(t, s, 0, "node1")
|
||||
testRegisterService(t, s, 1, "node1", "service1")
|
||||
testRegisterCheck(t, s, 2, "node1", "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "service1", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check2", api.HealthPassing)
|
||||
testRegisterNode(t, s, 4, "node2")
|
||||
testRegisterService(t, s, 5, "node2", "service2")
|
||||
testRegisterCheck(t, s, 6, "node2", "service2", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 6, "node2", "service2", "check3", api.HealthPassing)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1737,7 +1738,7 @@ func TestStateStore_NodeChecks(t *testing.T) {
|
|||
|
||||
// Creating some unrelated node should not fire the watch.
|
||||
testRegisterNode(t, s, 7, "node3")
|
||||
testRegisterCheck(t, s, 8, "node3", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 8, "node3", "", "check1", api.HealthPassing)
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1756,7 +1757,7 @@ func TestStateStore_NodeChecks(t *testing.T) {
|
|||
}
|
||||
|
||||
// Changing node2 should fire the watch.
|
||||
testRegisterCheck(t, s, 9, "node2", "service2", "check3", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 9, "node2", "service2", "check3", api.HealthCritical)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1781,11 +1782,11 @@ func TestStateStore_ServiceChecks(t *testing.T) {
|
|||
// Create some nodes and checks.
|
||||
testRegisterNode(t, s, 0, "node1")
|
||||
testRegisterService(t, s, 1, "node1", "service1")
|
||||
testRegisterCheck(t, s, 2, "node1", "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "service1", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check2", api.HealthPassing)
|
||||
testRegisterNode(t, s, 4, "node2")
|
||||
testRegisterService(t, s, 5, "node2", "service2")
|
||||
testRegisterCheck(t, s, 6, "node2", "service2", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 6, "node2", "service2", "check3", api.HealthPassing)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1805,13 +1806,13 @@ func TestStateStore_ServiceChecks(t *testing.T) {
|
|||
|
||||
// Adding some unrelated service + check should not fire the watch.
|
||||
testRegisterService(t, s, 7, "node1", "service3")
|
||||
testRegisterCheck(t, s, 8, "node1", "service3", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 8, "node1", "service3", "check3", api.HealthPassing)
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// Updating a related check should fire the watch.
|
||||
testRegisterCheck(t, s, 9, "node1", "service1", "check2", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 9, "node1", "service1", "check2", api.HealthCritical)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1836,11 +1837,11 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) {
|
|||
// Create some nodes and checks.
|
||||
testRegisterNodeWithMeta(t, s, 0, "node1", map[string]string{"somekey": "somevalue", "common": "1"})
|
||||
testRegisterService(t, s, 1, "node1", "service1")
|
||||
testRegisterCheck(t, s, 2, "node1", "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "service1", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node1", "service1", "check2", api.HealthPassing)
|
||||
testRegisterNodeWithMeta(t, s, 4, "node2", map[string]string{"common": "1"})
|
||||
testRegisterService(t, s, 5, "node2", "service1")
|
||||
testRegisterCheck(t, s, 6, "node2", "service1", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 6, "node2", "service1", "check3", api.HealthPassing)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1903,7 +1904,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) {
|
|||
idx++
|
||||
testRegisterService(t, s, idx, node, "service1")
|
||||
idx++
|
||||
testRegisterCheck(t, s, idx, node, "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, idx, node, "service1", "check1", api.HealthPassing)
|
||||
idx++
|
||||
}
|
||||
|
||||
|
@ -1928,23 +1929,23 @@ func TestStateStore_ChecksInState(t *testing.T) {
|
|||
|
||||
// Querying with no results returns nil
|
||||
ws := memdb.NewWatchSet()
|
||||
idx, res, err := s.ChecksInState(ws, structs.HealthPassing)
|
||||
idx, res, err := s.ChecksInState(ws, api.HealthPassing)
|
||||
if idx != 0 || res != nil || err != nil {
|
||||
t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err)
|
||||
}
|
||||
|
||||
// Register a node with checks in varied states
|
||||
testRegisterNode(t, s, 0, "node1")
|
||||
testRegisterCheck(t, s, 1, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check2", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 3, "node1", "", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 1, "node1", "", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check2", api.HealthCritical)
|
||||
testRegisterCheck(t, s, 3, "node1", "", "check3", api.HealthPassing)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// Query the state store for passing checks.
|
||||
ws = memdb.NewWatchSet()
|
||||
_, checks, err := s.ChecksInState(ws, structs.HealthPassing)
|
||||
_, checks, err := s.ChecksInState(ws, api.HealthPassing)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1961,14 +1962,14 @@ func TestStateStore_ChecksInState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Changing the state of a check should fire the watch.
|
||||
testRegisterCheck(t, s, 4, "node1", "", "check1", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 4, "node1", "", "check1", api.HealthCritical)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// HealthAny just returns everything.
|
||||
ws = memdb.NewWatchSet()
|
||||
_, checks, err = s.ChecksInState(ws, structs.HealthAny)
|
||||
_, checks, err = s.ChecksInState(ws, api.HealthAny)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1980,7 +1981,7 @@ func TestStateStore_ChecksInState(t *testing.T) {
|
|||
}
|
||||
|
||||
// Adding a new check should fire the watch.
|
||||
testRegisterCheck(t, s, 5, "node1", "", "check4", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 5, "node1", "", "check4", api.HealthCritical)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -1991,17 +1992,17 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) {
|
|||
|
||||
// Querying with no results returns nil.
|
||||
ws := memdb.NewWatchSet()
|
||||
idx, res, err := s.ChecksInStateByNodeMeta(ws, structs.HealthPassing, nil)
|
||||
idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil)
|
||||
if idx != 0 || res != nil || err != nil {
|
||||
t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err)
|
||||
}
|
||||
|
||||
// Register a node with checks in varied states.
|
||||
testRegisterNodeWithMeta(t, s, 0, "node1", map[string]string{"somekey": "somevalue", "common": "1"})
|
||||
testRegisterCheck(t, s, 1, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check2", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 1, "node1", "", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check2", api.HealthCritical)
|
||||
testRegisterNodeWithMeta(t, s, 3, "node2", map[string]string{"common": "1"})
|
||||
testRegisterCheck(t, s, 4, "node2", "", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 4, "node2", "", "check3", api.HealthPassing)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -2014,25 +2015,25 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) {
|
|||
// Basic meta filter, any status
|
||||
{
|
||||
filters: map[string]string{"somekey": "somevalue"},
|
||||
state: structs.HealthAny,
|
||||
state: api.HealthAny,
|
||||
checks: []string{"check2", "check1"},
|
||||
},
|
||||
// Basic meta filter, only passing
|
||||
{
|
||||
filters: map[string]string{"somekey": "somevalue"},
|
||||
state: structs.HealthPassing,
|
||||
state: api.HealthPassing,
|
||||
checks: []string{"check1"},
|
||||
},
|
||||
// Common meta filter, any status
|
||||
{
|
||||
filters: map[string]string{"common": "1"},
|
||||
state: structs.HealthAny,
|
||||
state: api.HealthAny,
|
||||
checks: []string{"check2", "check1", "check3"},
|
||||
},
|
||||
// Common meta filter, only passing
|
||||
{
|
||||
filters: map[string]string{"common": "1"},
|
||||
state: structs.HealthPassing,
|
||||
state: api.HealthPassing,
|
||||
checks: []string{"check1", "check3"},
|
||||
},
|
||||
// Invalid meta filter
|
||||
|
@ -2043,13 +2044,13 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) {
|
|||
// Multiple filters, any status
|
||||
{
|
||||
filters: map[string]string{"somekey": "somevalue", "common": "1"},
|
||||
state: structs.HealthAny,
|
||||
state: api.HealthAny,
|
||||
checks: []string{"check2", "check1"},
|
||||
},
|
||||
// Multiple filters, only passing
|
||||
{
|
||||
filters: map[string]string{"somekey": "somevalue", "common": "1"},
|
||||
state: structs.HealthPassing,
|
||||
state: api.HealthPassing,
|
||||
checks: []string{"check1"},
|
||||
},
|
||||
}
|
||||
|
@ -2086,14 +2087,14 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) {
|
|||
idx++
|
||||
testRegisterService(t, s, idx, node, "service1")
|
||||
idx++
|
||||
testRegisterCheck(t, s, idx, node, "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, idx, node, "service1", "check1", api.HealthPassing)
|
||||
idx++
|
||||
}
|
||||
|
||||
// Now get a fresh watch, which will be forced to watch the whole
|
||||
// node table.
|
||||
ws = memdb.NewWatchSet()
|
||||
_, _, err = s.ChecksInStateByNodeMeta(ws, structs.HealthPassing,
|
||||
_, _, err = s.ChecksInStateByNodeMeta(ws, api.HealthPassing,
|
||||
map[string]string{"common": "1"})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -2111,7 +2112,7 @@ func TestStateStore_DeleteCheck(t *testing.T) {
|
|||
|
||||
// Register a node and a node-level health check.
|
||||
testRegisterNode(t, s, 1, "node1")
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check1", api.HealthPassing)
|
||||
|
||||
// Make sure the check is there.
|
||||
ws := memdb.NewWatchSet()
|
||||
|
@ -2174,16 +2175,16 @@ func TestStateStore_CheckServiceNodes(t *testing.T) {
|
|||
testRegisterNode(t, s, 1, "node2")
|
||||
|
||||
// Register node-level checks. These should be the final result.
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node2", "", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 2, "node1", "", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 3, "node2", "", "check2", api.HealthPassing)
|
||||
|
||||
// Register a service against the nodes.
|
||||
testRegisterService(t, s, 4, "node1", "service1")
|
||||
testRegisterService(t, s, 5, "node2", "service2")
|
||||
|
||||
// Register checks against the services.
|
||||
testRegisterCheck(t, s, 6, "node1", "service1", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 7, "node2", "service2", "check4", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 6, "node1", "service1", "check3", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 7, "node2", "service2", "check4", api.HealthPassing)
|
||||
|
||||
// At this point all the changes should have fired the watch.
|
||||
if !watchFired(ws) {
|
||||
|
@ -2241,7 +2242,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check updates alter the returned index and fire the watch.
|
||||
testRegisterCheck(t, s, 10, "node1", "service1", "check1", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 10, "node1", "service1", "check1", api.HealthCritical)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
@ -2267,11 +2268,11 @@ func TestStateStore_CheckServiceNodes(t *testing.T) {
|
|||
node := fmt.Sprintf("many%d", i)
|
||||
testRegisterNode(t, s, idx, node)
|
||||
idx++
|
||||
testRegisterCheck(t, s, idx, node, "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, idx, node, "", "check1", api.HealthPassing)
|
||||
idx++
|
||||
testRegisterService(t, s, idx, node, "service1")
|
||||
idx++
|
||||
testRegisterCheck(t, s, idx, node, "service1", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, idx, node, "service1", "check2", api.HealthPassing)
|
||||
idx++
|
||||
}
|
||||
|
||||
|
@ -2293,7 +2294,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
testRegisterCheck(t, s, idx, "more-nope", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, idx, "more-nope", "", "check1", api.HealthPassing)
|
||||
idx++
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
|
@ -2316,7 +2317,7 @@ func BenchmarkCheckServiceNodes(b *testing.B) {
|
|||
Node: "foo",
|
||||
CheckID: "db",
|
||||
Name: "can connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db1",
|
||||
}
|
||||
if err := s.EnsureCheck(3, check); err != nil {
|
||||
|
@ -2326,7 +2327,7 @@ func BenchmarkCheckServiceNodes(b *testing.B) {
|
|||
Node: "foo",
|
||||
CheckID: "check1",
|
||||
Name: "check1",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
if err := s.EnsureCheck(4, check); err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
|
@ -2351,7 +2352,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "db",
|
||||
Name: "can connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db1",
|
||||
}
|
||||
if err := s.EnsureCheck(3, check); err != nil {
|
||||
|
@ -2361,7 +2362,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "check1",
|
||||
Name: "another check",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
if err := s.EnsureCheck(4, check); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -2414,13 +2415,13 @@ func TestStateStore_Check_Snapshot(t *testing.T) {
|
|||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "node check",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Node: "node1",
|
||||
CheckID: "check2",
|
||||
Name: "service check",
|
||||
Status: structs.HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
ServiceID: "service1",
|
||||
},
|
||||
}
|
||||
|
@ -2434,14 +2435,14 @@ func TestStateStore_Check_Snapshot(t *testing.T) {
|
|||
// will affect the index but not the dump.
|
||||
testRegisterNode(t, s, 3, "node2")
|
||||
testRegisterService(t, s, 4, "node2", "service2")
|
||||
testRegisterCheck(t, s, 5, "node2", "service2", "check3", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 5, "node2", "service2", "check3", api.HealthPassing)
|
||||
|
||||
// Snapshot the checks.
|
||||
snap := s.Snapshot()
|
||||
defer snap.Close()
|
||||
|
||||
// Alter the real state store.
|
||||
testRegisterCheck(t, s, 6, "node2", "service2", "check4", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 6, "node2", "service2", "check4", api.HealthPassing)
|
||||
|
||||
// Verify the snapshot.
|
||||
if idx := snap.LastIndex(); idx != 5 {
|
||||
|
@ -2493,12 +2494,12 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) {
|
|||
testRegisterService(t, s, 5, "node2", "service2")
|
||||
|
||||
// Register service-level checks
|
||||
testRegisterCheck(t, s, 6, "node1", "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 7, "node2", "service1", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 6, "node1", "service1", "check1", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 7, "node2", "service1", "check1", api.HealthPassing)
|
||||
|
||||
// Register node-level checks
|
||||
testRegisterCheck(t, s, 8, "node1", "", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 9, "node2", "", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 8, "node1", "", "check2", api.HealthPassing)
|
||||
testRegisterCheck(t, s, 9, "node2", "", "check2", api.HealthPassing)
|
||||
|
||||
// Both watches should have fired due to the changes above.
|
||||
if !watchFired(wsInfo) {
|
||||
|
@ -2518,7 +2519,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) {
|
|||
CheckID: "check1",
|
||||
ServiceID: "service1",
|
||||
ServiceName: "service1",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 6,
|
||||
ModifyIndex: 6,
|
||||
|
@ -2529,7 +2530,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) {
|
|||
CheckID: "check2",
|
||||
ServiceID: "",
|
||||
ServiceName: "",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 8,
|
||||
ModifyIndex: 8,
|
||||
|
@ -2567,7 +2568,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) {
|
|||
CheckID: "check1",
|
||||
ServiceID: "service1",
|
||||
ServiceName: "service1",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 7,
|
||||
ModifyIndex: 7,
|
||||
|
@ -2578,7 +2579,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) {
|
|||
CheckID: "check2",
|
||||
ServiceID: "",
|
||||
ServiceName: "",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: 9,
|
||||
ModifyIndex: 9,
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
@ -112,7 +113,7 @@ func (s *StateStore) sessionCreateTxn(tx *memdb.Txn, idx uint64, sess *structs.S
|
|||
|
||||
// Check that the check is not in critical state
|
||||
status := check.(*structs.HealthCheck).Status
|
||||
if status == structs.HealthCritical {
|
||||
if status == api.HealthCritical {
|
||||
return fmt.Errorf("Check '%s' is in %s state", checkID, status)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
@ -108,9 +109,9 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) {
|
|||
}
|
||||
|
||||
// Registering with a critical check is disallowed
|
||||
testRegisterCheck(t, s, 3, "node1", "", "check1", structs.HealthCritical)
|
||||
testRegisterCheck(t, s, 3, "node1", "", "check1", api.HealthCritical)
|
||||
err = s.SessionCreate(4, sess)
|
||||
if err == nil || !strings.Contains(err.Error(), structs.HealthCritical) {
|
||||
if err == nil || !strings.Contains(err.Error(), api.HealthCritical) {
|
||||
t.Fatalf("expected critical state error, got: %#v", err)
|
||||
}
|
||||
if watchFired(ws) {
|
||||
|
@ -119,7 +120,7 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) {
|
|||
|
||||
// Registering with a healthy check succeeds (doesn't hit the watch since
|
||||
// we are looking at the old session).
|
||||
testRegisterCheck(t, s, 4, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 4, "node1", "", "check1", api.HealthPassing)
|
||||
if err := s.SessionCreate(5, sess); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -128,7 +129,7 @@ func TestStateStore_SessionCreate_SessionGet(t *testing.T) {
|
|||
}
|
||||
|
||||
// Register a session against two checks.
|
||||
testRegisterCheck(t, s, 5, "node1", "", "check2", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 5, "node1", "", "check2", api.HealthPassing)
|
||||
sess2 := &structs.Session{
|
||||
ID: testUUID(),
|
||||
Node: "node1",
|
||||
|
@ -376,7 +377,7 @@ func TestStateStore_Session_Snapshot_Restore(t *testing.T) {
|
|||
testRegisterNode(t, s, 1, "node1")
|
||||
testRegisterNode(t, s, 2, "node2")
|
||||
testRegisterNode(t, s, 3, "node3")
|
||||
testRegisterCheck(t, s, 4, "node1", "", "check1", structs.HealthPassing)
|
||||
testRegisterCheck(t, s, 4, "node1", "", "check1", api.HealthPassing)
|
||||
|
||||
// Create some sessions in the state store.
|
||||
session1 := testUUID()
|
||||
|
@ -559,7 +560,7 @@ func TestStateStore_Session_Invalidate_DeleteService(t *testing.T) {
|
|||
Node: "foo",
|
||||
CheckID: "api",
|
||||
Name: "Can connect",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "api",
|
||||
}
|
||||
if err := s.EnsureCheck(13, check); err != nil {
|
||||
|
@ -610,7 +611,7 @@ func TestStateStore_Session_Invalidate_Critical_Check(t *testing.T) {
|
|||
check := &structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "bar",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
if err := s.EnsureCheck(13, check); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -630,7 +631,7 @@ func TestStateStore_Session_Invalidate_Critical_Check(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
check.Status = structs.HealthCritical
|
||||
check.Status = api.HealthCritical
|
||||
if err := s.EnsureCheck(15, check); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -661,7 +662,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) {
|
|||
check := &structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "bar",
|
||||
Status: structs.HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
if err := s.EnsureCheck(13, check); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -3,6 +3,7 @@ package state
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
@ -13,24 +14,24 @@ func (s *StateStore) txnKVS(tx *memdb.Txn, idx uint64, op *structs.TxnKVOp) (str
|
|||
var err error
|
||||
|
||||
switch op.Verb {
|
||||
case structs.KVSSet:
|
||||
case api.KVSet:
|
||||
entry = &op.DirEnt
|
||||
err = s.kvsSetTxn(tx, idx, entry, false)
|
||||
|
||||
case structs.KVSDelete:
|
||||
case api.KVDelete:
|
||||
err = s.kvsDeleteTxn(tx, idx, op.DirEnt.Key)
|
||||
|
||||
case structs.KVSDeleteCAS:
|
||||
case api.KVDeleteCAS:
|
||||
var ok bool
|
||||
ok, err = s.kvsDeleteCASTxn(tx, idx, op.DirEnt.ModifyIndex, op.DirEnt.Key)
|
||||
if !ok && err == nil {
|
||||
err = fmt.Errorf("failed to delete key %q, index is stale", op.DirEnt.Key)
|
||||
}
|
||||
|
||||
case structs.KVSDeleteTree:
|
||||
case api.KVDeleteTree:
|
||||
err = s.kvsDeleteTreeTxn(tx, idx, op.DirEnt.Key)
|
||||
|
||||
case structs.KVSCAS:
|
||||
case api.KVCAS:
|
||||
var ok bool
|
||||
entry = &op.DirEnt
|
||||
ok, err = s.kvsSetCASTxn(tx, idx, entry)
|
||||
|
@ -38,7 +39,7 @@ func (s *StateStore) txnKVS(tx *memdb.Txn, idx uint64, op *structs.TxnKVOp) (str
|
|||
err = fmt.Errorf("failed to set key %q, index is stale", op.DirEnt.Key)
|
||||
}
|
||||
|
||||
case structs.KVSLock:
|
||||
case api.KVLock:
|
||||
var ok bool
|
||||
entry = &op.DirEnt
|
||||
ok, err = s.kvsLockTxn(tx, idx, entry)
|
||||
|
@ -46,7 +47,7 @@ func (s *StateStore) txnKVS(tx *memdb.Txn, idx uint64, op *structs.TxnKVOp) (str
|
|||
err = fmt.Errorf("failed to lock key %q, lock is already held", op.DirEnt.Key)
|
||||
}
|
||||
|
||||
case structs.KVSUnlock:
|
||||
case api.KVUnlock:
|
||||
var ok bool
|
||||
entry = &op.DirEnt
|
||||
ok, err = s.kvsUnlockTxn(tx, idx, entry)
|
||||
|
@ -54,13 +55,13 @@ func (s *StateStore) txnKVS(tx *memdb.Txn, idx uint64, op *structs.TxnKVOp) (str
|
|||
err = fmt.Errorf("failed to unlock key %q, lock isn't held, or is held by another session", op.DirEnt.Key)
|
||||
}
|
||||
|
||||
case structs.KVSGet:
|
||||
case api.KVGet:
|
||||
_, entry, err = s.kvsGetTxn(tx, nil, op.DirEnt.Key)
|
||||
if entry == nil && err == nil {
|
||||
err = fmt.Errorf("key %q doesn't exist", op.DirEnt.Key)
|
||||
}
|
||||
|
||||
case structs.KVSGetTree:
|
||||
case api.KVGetTree:
|
||||
var entries structs.DirEntries
|
||||
_, entries, err = s.kvsListTxn(tx, nil, op.DirEnt.Key)
|
||||
if err == nil {
|
||||
|
@ -72,10 +73,10 @@ func (s *StateStore) txnKVS(tx *memdb.Txn, idx uint64, op *structs.TxnKVOp) (str
|
|||
return results, nil
|
||||
}
|
||||
|
||||
case structs.KVSCheckSession:
|
||||
case api.KVCheckSession:
|
||||
entry, err = s.kvsCheckSessionTxn(tx, op.DirEnt.Key, op.DirEnt.Session)
|
||||
|
||||
case structs.KVSCheckIndex:
|
||||
case api.KVCheckIndex:
|
||||
entry, err = s.kvsCheckIndexTxn(tx, op.DirEnt.Key, op.DirEnt.ModifyIndex)
|
||||
|
||||
default:
|
||||
|
@ -89,7 +90,7 @@ func (s *StateStore) txnKVS(tx *memdb.Txn, idx uint64, op *structs.TxnKVOp) (str
|
|||
// value (we have to clone so we don't modify the entry being used by
|
||||
// the state store).
|
||||
if entry != nil {
|
||||
if op.Verb == structs.KVSGet {
|
||||
if op.Verb == api.KVGet {
|
||||
result := structs.TxnResult{KV: entry}
|
||||
return structs.TxnResults{&result}, nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
|
@ -29,7 +30,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
ops := structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGetTree,
|
||||
Verb: api.KVGetTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar",
|
||||
},
|
||||
|
@ -37,7 +38,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSSet,
|
||||
Verb: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/new",
|
||||
Value: []byte("one"),
|
||||
|
@ -46,7 +47,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDelete,
|
||||
Verb: api.KVDelete,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/zorp",
|
||||
},
|
||||
|
@ -54,7 +55,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDeleteCAS,
|
||||
Verb: api.KVDeleteCAS,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/delete",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -65,7 +66,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDeleteTree,
|
||||
Verb: api.KVDeleteTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar",
|
||||
},
|
||||
|
@ -73,7 +74,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/update",
|
||||
},
|
||||
|
@ -81,7 +82,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckIndex,
|
||||
Verb: api.KVCheckIndex,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/update",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -92,7 +93,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCAS,
|
||||
Verb: api.KVCAS,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/update",
|
||||
Value: []byte("new"),
|
||||
|
@ -104,7 +105,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/update",
|
||||
},
|
||||
|
@ -112,7 +113,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckIndex,
|
||||
Verb: api.KVCheckIndex,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/update",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -123,7 +124,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSLock,
|
||||
Verb: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: session,
|
||||
|
@ -132,7 +133,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: session,
|
||||
|
@ -141,7 +142,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSUnlock,
|
||||
Verb: api.KVUnlock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: session,
|
||||
|
@ -150,7 +151,7 @@ func TestStateStore_Txn_KVS(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: "",
|
||||
|
@ -416,7 +417,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
ops := structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCAS,
|
||||
Verb: api.KVCAS,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/update",
|
||||
Value: []byte("new"),
|
||||
|
@ -428,7 +429,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSLock,
|
||||
Verb: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: bogus,
|
||||
|
@ -437,7 +438,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSUnlock,
|
||||
Verb: api.KVUnlock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: bogus,
|
||||
|
@ -446,7 +447,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
Session: bogus,
|
||||
|
@ -455,7 +456,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -463,7 +464,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
Session: bogus,
|
||||
|
@ -472,7 +473,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckIndex,
|
||||
Verb: api.KVCheckIndex,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/lock",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -483,7 +484,7 @@ func TestStateStore_Txn_KVS_Rollback(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckIndex,
|
||||
Verb: api.KVCheckIndex,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -547,7 +548,7 @@ func TestStateStore_Txn_KVS_RO(t *testing.T) {
|
|||
ops := structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGetTree,
|
||||
Verb: api.KVGetTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar",
|
||||
},
|
||||
|
@ -555,7 +556,7 @@ func TestStateStore_Txn_KVS_RO(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo",
|
||||
},
|
||||
|
@ -563,7 +564,7 @@ func TestStateStore_Txn_KVS_RO(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar/baz",
|
||||
Session: "",
|
||||
|
@ -572,7 +573,7 @@ func TestStateStore_Txn_KVS_RO(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar/zip",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -660,7 +661,7 @@ func TestStateStore_Txn_KVS_RO_Safety(t *testing.T) {
|
|||
ops := structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSSet,
|
||||
Verb: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo",
|
||||
Value: []byte("nope"),
|
||||
|
@ -669,7 +670,7 @@ func TestStateStore_Txn_KVS_RO_Safety(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDelete,
|
||||
Verb: api.KVDelete,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar/baz",
|
||||
},
|
||||
|
@ -677,7 +678,7 @@ func TestStateStore_Txn_KVS_RO_Safety(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDeleteTree,
|
||||
Verb: api.KVDeleteTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "foo/bar",
|
||||
},
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/agent"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
|
@ -33,7 +33,7 @@ func TestStatsFetcher(t *testing.T) {
|
|||
if _, err := s3.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
members := s1.serfLAN.Members()
|
||||
if len(members) != 3 {
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -39,7 +39,7 @@ func TestStatusLeader(t *testing.T) {
|
|||
t.Fatalf("unexpected leader: %v", leader)
|
||||
}
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Status.Leader", arg, &leader); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
|
@ -53,16 +54,6 @@ const (
|
|||
IgnoreUnknownTypeFlag MessageType = 128
|
||||
)
|
||||
|
||||
const (
|
||||
// HealthAny is special, and is used as a wild card,
|
||||
// not as a specific state.
|
||||
HealthAny = "any"
|
||||
HealthPassing = "passing"
|
||||
HealthWarning = "warning"
|
||||
HealthCritical = "critical"
|
||||
HealthMaint = "maintenance"
|
||||
)
|
||||
|
||||
const (
|
||||
// NodeMaint is the special key set by a node in maintenance mode.
|
||||
NodeMaint = "_node_maintenance"
|
||||
|
@ -91,9 +82,7 @@ var (
|
|||
)
|
||||
|
||||
func ValidStatus(s string) bool {
|
||||
return s == HealthPassing ||
|
||||
s == HealthWarning ||
|
||||
s == HealthCritical
|
||||
return s == api.HealthPassing || s == api.HealthWarning || s == api.HealthCritical
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -567,8 +556,8 @@ OUTER:
|
|||
for i := 0; i < n; i++ {
|
||||
node := nodes[i]
|
||||
for _, check := range node.Checks {
|
||||
if check.Status == HealthCritical ||
|
||||
(onlyPassing && check.Status != HealthPassing) {
|
||||
if check.Status == api.HealthCritical ||
|
||||
(onlyPassing && check.Status != api.HealthPassing) {
|
||||
nodes[i], nodes[n-1] = nodes[n-1], CheckServiceNode{}
|
||||
n--
|
||||
i--
|
||||
|
@ -661,40 +650,10 @@ func (d *DirEntry) Clone() *DirEntry {
|
|||
|
||||
type DirEntries []*DirEntry
|
||||
|
||||
type KVSOp string
|
||||
|
||||
const (
|
||||
KVSSet KVSOp = "set"
|
||||
KVSDelete = "delete"
|
||||
KVSDeleteCAS = "delete-cas" // Delete with check-and-set
|
||||
KVSDeleteTree = "delete-tree"
|
||||
KVSCAS = "cas" // Check-and-set
|
||||
KVSLock = "lock" // Lock a key
|
||||
KVSUnlock = "unlock" // Unlock a key
|
||||
|
||||
// The following operations are only available inside of atomic
|
||||
// transactions via the Txn request.
|
||||
KVSGet = "get" // Read the key during the transaction.
|
||||
KVSGetTree = "get-tree" // Read all keys with the given prefix during the transaction.
|
||||
KVSCheckSession = "check-session" // Check the session holds the key.
|
||||
KVSCheckIndex = "check-index" // Check the modify index of the key.
|
||||
)
|
||||
|
||||
// IsWrite returns true if the given operation alters the state store.
|
||||
func (op KVSOp) IsWrite() bool {
|
||||
switch op {
|
||||
case KVSGet, KVSGetTree, KVSCheckSession, KVSCheckIndex:
|
||||
return false
|
||||
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// KVSRequest is used to operate on the Key-Value store
|
||||
type KVSRequest struct {
|
||||
Datacenter string
|
||||
Op KVSOp // Which operation are we performing
|
||||
Op api.KVOp // Which operation are we performing
|
||||
DirEnt DirEntry // Which directory entry
|
||||
WriteRequest
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
|
@ -299,7 +300,7 @@ func TestStructs_HealthCheck_IsSame(t *testing.T) {
|
|||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
|
@ -313,7 +314,7 @@ func TestStructs_HealthCheck_IsSame(t *testing.T) {
|
|||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
|
@ -376,7 +377,7 @@ func TestStructs_HealthCheck_Clone(t *testing.T) {
|
|||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
|
@ -435,7 +436,7 @@ func TestStructs_CheckServiceNodes_Filter(t *testing.T) {
|
|||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Status: HealthWarning,
|
||||
Status: api.HealthWarning,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -446,7 +447,7 @@ func TestStructs_CheckServiceNodes_Filter(t *testing.T) {
|
|||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Status: HealthPassing,
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -457,7 +458,7 @@ func TestStructs_CheckServiceNodes_Filter(t *testing.T) {
|
|||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Status: HealthCritical,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -2,12 +2,14 @@ package structs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
// TxnKVOp is used to define a single operation on the KVS inside a
|
||||
// transaction
|
||||
type TxnKVOp struct {
|
||||
Verb KVSOp
|
||||
Verb api.KVOp
|
||||
DirEnt DirEntry
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
)
|
||||
|
||||
|
@ -20,7 +21,7 @@ func TestTxn_Apply(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Do a super basic request. The state store test covers the details so
|
||||
// we just need to be sure that the transaction is sent correctly and
|
||||
|
@ -30,7 +31,7 @@ func TestTxn_Apply(t *testing.T) {
|
|||
Ops: structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSSet,
|
||||
Verb: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Flags: 42,
|
||||
|
@ -40,7 +41,7 @@ func TestTxn_Apply(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
},
|
||||
|
@ -110,7 +111,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Put in a key to read back.
|
||||
state := s1.fsm.State()
|
||||
|
@ -147,7 +148,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
Ops: structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSSet,
|
||||
Verb: api.KVSet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -155,7 +156,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDelete,
|
||||
Verb: api.KVDelete,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -163,7 +164,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDeleteCAS,
|
||||
Verb: api.KVDeleteCAS,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -171,7 +172,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSDeleteTree,
|
||||
Verb: api.KVDeleteTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -179,7 +180,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCAS,
|
||||
Verb: api.KVCAS,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -187,7 +188,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSLock,
|
||||
Verb: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -195,7 +196,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSUnlock,
|
||||
Verb: api.KVUnlock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -203,7 +204,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -211,7 +212,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGetTree,
|
||||
Verb: api.KVGetTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -219,7 +220,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -227,7 +228,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckIndex,
|
||||
Verb: api.KVCheckIndex,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -247,7 +248,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) {
|
|||
var expected structs.TxnResponse
|
||||
for i, op := range arg.Ops {
|
||||
switch op.KV.Verb {
|
||||
case structs.KVSGet, structs.KVSGetTree:
|
||||
case api.KVGet, api.KVGetTree:
|
||||
// These get filtered but won't result in an error.
|
||||
|
||||
default:
|
||||
|
@ -269,7 +270,7 @@ func TestTxn_Apply_LockDelay(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create and invalidate a session with a lock.
|
||||
state := s1.fsm.State()
|
||||
|
@ -308,7 +309,7 @@ func TestTxn_Apply_LockDelay(t *testing.T) {
|
|||
Ops: structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSLock,
|
||||
Verb: api.KVLock,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
Session: validId,
|
||||
|
@ -354,7 +355,7 @@ func TestTxn_Read(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Put in a key to read back.
|
||||
state := s1.fsm.State()
|
||||
|
@ -374,7 +375,7 @@ func TestTxn_Read(t *testing.T) {
|
|||
Ops: structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "test",
|
||||
},
|
||||
|
@ -423,7 +424,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Put in a key to read back.
|
||||
state := s1.fsm.State()
|
||||
|
@ -460,7 +461,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
Ops: structs.TxnOps{
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGet,
|
||||
Verb: api.KVGet,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -468,7 +469,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSGetTree,
|
||||
Verb: api.KVGetTree,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -476,7 +477,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckSession,
|
||||
Verb: api.KVCheckSession,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -484,7 +485,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
},
|
||||
&structs.TxnOp{
|
||||
KV: &structs.TxnKVOp{
|
||||
Verb: structs.KVSCheckIndex,
|
||||
Verb: api.KVCheckIndex,
|
||||
DirEnt: structs.DirEntry{
|
||||
Key: "nope",
|
||||
},
|
||||
|
@ -508,7 +509,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) {
|
|||
}
|
||||
for i, op := range arg.Ops {
|
||||
switch op.KV.Verb {
|
||||
case structs.KVSGet, structs.KVSGetTree:
|
||||
case api.KVGet, api.KVGetTree:
|
||||
// These get filtered but won't result in an error.
|
||||
|
||||
default:
|
||||
|
|
2
main.go
2
main.go
|
@ -2,12 +2,12 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mitchellh/cli"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
63
testrpc/wait.go
Normal file
63
testrpc/wait.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
package testrpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type testFn func() (bool, error)
|
||||
|
||||
const (
|
||||
baseWait = 1 * time.Millisecond
|
||||
maxWait = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
func WaitForResult(try testFn) error {
|
||||
var err error
|
||||
wait := baseWait
|
||||
for retries := 100; retries > 0; retries-- {
|
||||
var success bool
|
||||
success, err = try()
|
||||
if success {
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(wait)
|
||||
wait *= 2
|
||||
if wait > maxWait {
|
||||
wait = maxWait
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "timed out with error")
|
||||
} else {
|
||||
return fmt.Errorf("timed out")
|
||||
}
|
||||
}
|
||||
|
||||
type rpcFn func(string, interface{}, interface{}) error
|
||||
|
||||
func WaitForLeader(t *testing.T, rpc rpcFn, dc string) {
|
||||
var out structs.IndexedNodes
|
||||
if err := WaitForResult(func() (bool, error) {
|
||||
// Ensure we have a leader and a node registration.
|
||||
args := &structs.DCSpecificRequest{Datacenter: dc}
|
||||
if err := rpc("Catalog.ListNodes", args, &out); err != nil {
|
||||
return false, fmt.Errorf("Catalog.ListNodes failed: %v", err)
|
||||
}
|
||||
if !out.QueryMeta.KnownLeader {
|
||||
return false, fmt.Errorf("No leader")
|
||||
}
|
||||
if out.Index == 0 {
|
||||
return false, fmt.Errorf("Consul index is 0")
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to find leader: %v", err)
|
||||
}
|
||||
}
|
|
@ -11,10 +11,18 @@ import (
|
|||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// copied from testutil to break circular dependency
|
||||
const (
|
||||
HealthAny = "any"
|
||||
HealthPassing = "passing"
|
||||
HealthWarning = "warning"
|
||||
HealthCritical = "critical"
|
||||
HealthMaint = "maintenance"
|
||||
)
|
||||
|
||||
// JoinLAN is used to join local datacenters together.
|
||||
func (s *TestServer) JoinLAN(t *testing.T, addr string) {
|
||||
resp := s.get(t, "/v1/agent/join/"+addr)
|
||||
|
@ -124,11 +132,11 @@ func (s *TestServer) AddService(t *testing.T, name, status string, tags []string
|
|||
s.put(t, "/v1/agent/check/register", payload)
|
||||
|
||||
switch status {
|
||||
case structs.HealthPassing:
|
||||
case HealthPassing:
|
||||
s.put(t, "/v1/agent/check/pass/"+chkName, nil)
|
||||
case structs.HealthWarning:
|
||||
case HealthWarning:
|
||||
s.put(t, "/v1/agent/check/warn/"+chkName, nil)
|
||||
case structs.HealthCritical:
|
||||
case HealthCritical:
|
||||
s.put(t, "/v1/agent/check/fail/"+chkName, nil)
|
||||
default:
|
||||
t.Fatalf("Unrecognized status: %s", status)
|
||||
|
@ -155,11 +163,11 @@ func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) {
|
|||
s.put(t, "/v1/agent/check/register", payload)
|
||||
|
||||
switch status {
|
||||
case structs.HealthPassing:
|
||||
case HealthPassing:
|
||||
s.put(t, "/v1/agent/check/pass/"+name, nil)
|
||||
case structs.HealthWarning:
|
||||
case HealthWarning:
|
||||
s.put(t, "/v1/agent/check/warn/"+name, nil)
|
||||
case structs.HealthCritical:
|
||||
case HealthCritical:
|
||||
s.put(t, "/v1/agent/check/fail/"+name, nil)
|
||||
default:
|
||||
t.Fatalf("Unrecognized status: %s", status)
|
||||
|
|
|
@ -2,10 +2,8 @@ package testutil
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -39,28 +37,3 @@ func WaitForResult(try testFn) error {
|
|||
return fmt.Errorf("timed out")
|
||||
}
|
||||
}
|
||||
|
||||
type rpcFn func(string, interface{}, interface{}) error
|
||||
|
||||
func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes {
|
||||
var out structs.IndexedNodes
|
||||
if err := WaitForResult(func() (bool, error) {
|
||||
// Ensure we have a leader and a node registration.
|
||||
args := &structs.DCSpecificRequest{
|
||||
Datacenter: dc,
|
||||
}
|
||||
if err := rpc("Catalog.ListNodes", args, &out); err != nil {
|
||||
return false, fmt.Errorf("Catalog.ListNodes failed: %v", err)
|
||||
}
|
||||
if !out.QueryMeta.KnownLeader {
|
||||
return false, fmt.Errorf("No leader")
|
||||
}
|
||||
if out.Index == 0 {
|
||||
return false, fmt.Errorf("Consul index is 0")
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("failed to find leader: %v", err)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
var consulAddr string
|
||||
|
@ -300,7 +299,7 @@ func TestChecksWatch_State(t *testing.T) {
|
|||
Node: "foobar",
|
||||
CheckID: "foobar",
|
||||
Name: "foobar",
|
||||
Status: structs.HealthWarning,
|
||||
Status: consulapi.HealthWarning,
|
||||
},
|
||||
}
|
||||
catalog.Register(reg, nil)
|
||||
|
@ -364,7 +363,7 @@ func TestChecksWatch_Service(t *testing.T) {
|
|||
Node: "foobar",
|
||||
CheckID: "foobar",
|
||||
Name: "foobar",
|
||||
Status: structs.HealthPassing,
|
||||
Status: consulapi.HealthPassing,
|
||||
ServiceID: "foobar",
|
||||
},
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue