catalog: compare node names case insensitively in more places (#12444)

Many places in consul already treated node names case insensitively.
The state store indexes already do it, but there are a few places that
did a direct byte comparison which have now been corrected.

One place of particular consideration is ensureCheckIfNodeMatches
which is executed during snapshot restore (among other places). If a
node check used a slightly different casing than the casing of the node
during register then the snapshot restore here would deterministically
fail. This has been fixed.

Primary approach:

    git grep -i "node.*[!=]=.*node" -- ':!*_test.go' ':!docs'
    git grep -i '\[[^]]*member[^]]*\]
    git grep -i '\[[^]]*\(member\|name\|node\)[^]]*\]' -- ':!*_test.go' ':!website' ':!ui' ':!agent/proxycfg/testing.go:' ':!*.md'
This commit is contained in:
R.B. Boyer 2022-02-24 16:54:47 -06:00 committed by GitHub
parent 333789355c
commit a97d20cf63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 844 additions and 450 deletions

7
.changelog/12444.txt Normal file
View File

@ -0,0 +1,7 @@
```release-note:bug
catalog: compare node names case insensitively in more places
```
```release-note:bug
state: fix bug blocking snapshot restore when a node check and node differed in casing of the node string
```

View File

@ -2,19 +2,21 @@ package agent
import (
"fmt"
"github.com/hashicorp/consul/api"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/serf/coordinate"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
)
func TestCatalogRegister_Service_InvalidAddress(t *testing.T) {
@ -412,42 +414,28 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
Address: "127.0.0.1",
}
var out struct{}
if err := a.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, a.RPC("Catalog.Register", args, &out))
args = &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
}
if err := a.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, a.RPC("Catalog.Register", args, &out))
// Nobody has coordinates set so this will still return them in the
// order they are indexed.
req, _ := http.NewRequest("GET", "/v1/catalog/nodes?dc=dc1&near=foo", nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogNodes(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, err)
assertIndex(t, resp)
nodes := obj.(structs.Nodes)
if len(nodes) != 3 {
t.Fatalf("bad: %v", obj)
}
if nodes[0].Node != "bar" {
t.Fatalf("bad: %v", nodes)
}
if nodes[1].Node != "foo" {
t.Fatalf("bad: %v", nodes)
}
if nodes[2].Node != a.Config.NodeName {
t.Fatalf("bad: %v", nodes)
}
require.Len(t, nodes, 3)
require.Equal(t, "bar", nodes[0].Node)
require.Equal(t, "foo", nodes[1].Node)
require.Equal(t, a.Config.NodeName, nodes[2].Node)
// Send an update for the node and wait for it to get applied.
arg := structs.CoordinateUpdateRequest{
@ -455,33 +443,21 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
Node: "foo",
Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()),
}
if err := a.RPC("Coordinate.Update", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, a.RPC("Coordinate.Update", &arg, &out))
time.Sleep(300 * time.Millisecond)
// Query again and now foo should have moved to the front of the line.
req, _ = http.NewRequest("GET", "/v1/catalog/nodes?dc=dc1&near=foo", nil)
resp = httptest.NewRecorder()
obj, err = a.srv.CatalogNodes(resp, req)
if err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, err)
assertIndex(t, resp)
nodes = obj.(structs.Nodes)
if len(nodes) != 3 {
t.Fatalf("bad: %v", obj)
}
if nodes[0].Node != "foo" {
t.Fatalf("bad: %v", nodes)
}
if nodes[1].Node != "bar" {
t.Fatalf("bad: %v", nodes)
}
if nodes[2].Node != a.Config.NodeName {
t.Fatalf("bad: %v", nodes)
}
require.Len(t, nodes, 3)
require.Equal(t, "foo", nodes[0].Node)
require.Equal(t, "bar", nodes[1].Node)
require.Equal(t, a.Config.NodeName, nodes[2].Node)
}
func TestCatalogServices(t *testing.T) {

View File

@ -2,6 +2,7 @@ package checks
import (
"fmt"
"strings"
"sync"
"time"
@ -246,7 +247,7 @@ func (c *CheckAlias) processChecks(checks []*structs.HealthCheck, CheckIfService
msg := "No checks found."
serviceFound := false
for _, chk := range checks {
if c.Node != "" && c.Node != chk.Node {
if c.Node != "" && !strings.EqualFold(c.Node, chk.Node) {
continue
}
serviceMatch := c.ServiceID.Matches(chk.CompoundServiceID())

View File

@ -454,6 +454,7 @@ func TestCheckAlias_remoteNodeOnlyPassing(t *testing.T) {
func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
t.Parallel()
run := func(t *testing.T, responseNodeName string) {
notify := newMockAliasNotify()
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
rpc := &mockRPC{}
@ -475,14 +476,14 @@ func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
// Should ignore any services
{
Node: "remote",
Node: responseNodeName,
ServiceID: "db",
Status: api.HealthCritical,
},
// Match
{
Node: "remote",
Node: responseNodeName,
Status: api.HealthCritical,
},
},
@ -495,6 +496,14 @@ func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
r.Fatalf("got state %q want %q", got, want)
}
})
}
t.Run("same case node name", func(t *testing.T) {
run(t, "remote")
})
t.Run("lowercase node name", func(t *testing.T) {
run(t, "ReMoTe")
})
}
type mockAliasNotify struct {

View File

@ -3,6 +3,7 @@ package consul
import (
"fmt"
"sort"
"strings"
"time"
"github.com/armon/go-metrics"
@ -285,7 +286,7 @@ func vetRegisterWithACL(
// note in state_store.go to ban this down there in Consul 0.8,
// but it's good to leave this here because it's required for
// correctness wrt. ACLs.
if check.Node != subj.Node {
if !strings.EqualFold(check.Node, subj.Node) {
return fmt.Errorf("Node '%s' for check '%s' doesn't match register request node '%s'",
check.Node, check.CheckID, subj.Node)
}

View File

@ -19,6 +19,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/stringslice"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
@ -3431,42 +3432,49 @@ service "gateway" {
}
func TestVetRegisterWithACL(t *testing.T) {
t.Parallel()
appendAuthz := func(t *testing.T, defaultAuthz acl.Authorizer, rules string) acl.Authorizer {
policy, err := acl.NewPolicyFromSource(rules, acl.SyntaxCurrent, nil, nil)
require.NoError(t, err)
authz, err := acl.NewPolicyAuthorizerWithDefaults(defaultAuthz, []*acl.Policy{policy}, nil)
require.NoError(t, err)
return authz
}
t.Run("With an 'allow all' authorizer the update should be allowed", func(t *testing.T) {
args := &structs.RegisterRequest{
Node: "nope",
Address: "127.0.0.1",
}
// With an "allow all" authorizer the update should be allowed.
if err := vetRegisterWithACL(acl.ManageAll(), args, nil); err != nil {
t.Fatalf("err: %v", err)
require.NoError(t, vetRegisterWithACL(acl.ManageAll(), args, nil))
})
var perms acl.Authorizer = acl.DenyAll()
args := &structs.RegisterRequest{
Node: "nope",
Address: "127.0.0.1",
}
// Create a basic node policy.
policy, err := acl.NewPolicyFromSource(`
node "node" {
perms = appendAuthz(t, perms, `
node "node" {
policy = "write"
}
`, acl.SyntaxLegacy, nil, nil)
if err != nil {
t.Fatalf("err %v", err)
}
perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
} `)
// With that policy, the update should now be blocked for node reasons.
err = vetRegisterWithACL(perms, args, nil)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("bad: %v", err)
}
err := vetRegisterWithACL(perms, args, nil)
require.True(t, acl.IsErrPermissionDenied(err))
// Now use a permitted node name.
args.Node = "node"
if err := vetRegisterWithACL(perms, args, nil); err != nil {
t.Fatalf("err: %v", err)
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
}
require.NoError(t, vetRegisterWithACL(perms, args, nil))
// Build some node info that matches what we have now.
ns := &structs.NodeServices{
@ -3478,183 +3486,220 @@ node "node" {
}
// Try to register a service, which should be blocked.
args.Service = &structs.NodeService{
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "service",
ID: "my-id",
},
}
err = vetRegisterWithACL(perms, args, ns)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("bad: %v", err)
}
require.True(t, acl.IsErrPermissionDenied(err))
// Chain on a basic service policy.
policy, err = acl.NewPolicyFromSource(`
service "service" {
perms = appendAuthz(t, perms, `
service "service" {
policy = "write"
}
`, acl.SyntaxLegacy, nil, nil)
if err != nil {
t.Fatalf("err %v", err)
}
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
} `)
// With the service ACL, the update should go through.
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Add an existing service that they are clobbering and aren't allowed
// to write to.
ns.Services["my-id"] = &structs.NodeService{
ns = &structs.NodeServices{
Node: &structs.Node{
Node: "node",
Address: "127.0.0.1",
},
Services: map[string]*structs.NodeService{
"my-id": {
Service: "other",
ID: "my-id",
},
},
}
err = vetRegisterWithACL(perms, args, ns)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("bad: %v", err)
}
require.True(t, acl.IsErrPermissionDenied(err))
// Chain on a policy that allows them to write to the other service.
policy, err = acl.NewPolicyFromSource(`
service "other" {
perms = appendAuthz(t, perms, `
service "other" {
policy = "write"
}
`, acl.SyntaxLegacy, nil, nil)
if err != nil {
t.Fatalf("err %v", err)
}
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
} `)
// Now it should go through.
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Try creating the node and the service at once by having no existing
// node record. This should be ok since we have node and service
// permissions.
if err := vetRegisterWithACL(perms, args, nil); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, vetRegisterWithACL(perms, args, nil))
// Add a node-level check to the member, which should be rejected.
args.Check = &structs.HealthCheck{
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "service",
ID: "my-id",
},
Check: &structs.HealthCheck{
Node: "node",
},
}
err = vetRegisterWithACL(perms, args, ns)
if err == nil || !strings.Contains(err.Error(), "check member must be nil") {
t.Fatalf("bad: %v", err)
}
testutil.RequireErrorContains(t, err, "check member must be nil")
// Move the check into the slice, but give a bad node name.
args.Check.Node = "nope"
args.Checks = append(args.Checks, args.Check)
args.Check = nil
err = vetRegisterWithACL(perms, args, ns)
if err == nil || !strings.Contains(err.Error(), "doesn't match register request node") {
t.Fatalf("bad: %v", err)
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "service",
ID: "my-id",
},
Checks: []*structs.HealthCheck{
{
Node: "nope",
},
},
}
err = vetRegisterWithACL(perms, args, ns)
testutil.RequireErrorContains(t, err, "doesn't match register request node")
// Fix the node name, which should now go through.
args.Checks[0].Node = "node"
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "service",
ID: "my-id",
},
Checks: []*structs.HealthCheck{
{
Node: "node",
},
},
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Add a service-level check.
args.Checks = append(args.Checks, &structs.HealthCheck{
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "service",
ID: "my-id",
},
Checks: []*structs.HealthCheck{
{
Node: "node",
},
{
Node: "node",
ServiceID: "my-id",
})
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
},
},
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Try creating everything at once. This should be ok since we have all
// the permissions we need. It also makes sure that we can register a
// new node, service, and associated checks.
if err := vetRegisterWithACL(perms, args, nil); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, vetRegisterWithACL(perms, args, nil))
// Nil out the service registration, which'll skip the special case
// and force us to look at the ns data (it will look like we are
// writing to the "other" service which also has "my-id").
args.Service = nil
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Checks: []*structs.HealthCheck{
{
Node: "node",
},
{
Node: "node",
ServiceID: "my-id",
},
},
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Chain on a policy that forbids them to write to the other service.
policy, err = acl.NewPolicyFromSource(`
service "other" {
perms = appendAuthz(t, perms, `
service "other" {
policy = "deny"
}
`, acl.SyntaxLegacy, nil, nil)
if err != nil {
t.Fatalf("err %v", err)
}
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
} `)
// This should get rejected.
err = vetRegisterWithACL(perms, args, ns)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("bad: %v", err)
}
require.True(t, acl.IsErrPermissionDenied(err))
// Change the existing service data to point to a service name they
// car write to. This should go through.
ns.Services["my-id"] = &structs.NodeService{
// can write to. This should go through.
ns = &structs.NodeServices{
Node: &structs.Node{
Node: "node",
Address: "127.0.0.1",
},
Services: map[string]*structs.NodeService{
"my-id": {
Service: "service",
ID: "my-id",
},
},
}
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Chain on a policy that forbids them to write to the node.
policy, err = acl.NewPolicyFromSource(`
node "node" {
perms = appendAuthz(t, perms, `
node "node" {
policy = "deny"
}
`, acl.SyntaxLegacy, nil, nil)
if err != nil {
t.Fatalf("err %v", err)
}
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
} `)
// This should get rejected because there's a node-level check in here.
err = vetRegisterWithACL(perms, args, ns)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("bad: %v", err)
}
require.True(t, acl.IsErrPermissionDenied(err))
// Change the node-level check into a service check, and then it should
// go through.
args.Checks[0].ServiceID = "my-id"
if err := vetRegisterWithACL(perms, args, ns); err != nil {
t.Fatalf("err: %v", err)
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.1",
Checks: []*structs.HealthCheck{
{
Node: "node",
ServiceID: "my-id",
},
{
Node: "node",
ServiceID: "my-id",
},
},
}
require.NoError(t, vetRegisterWithACL(perms, args, ns))
// Finally, attempt to update the node part of the data and make sure
// that gets rejected since they no longer have permissions.
args.Address = "127.0.0.2"
err = vetRegisterWithACL(perms, args, ns)
if !acl.IsErrPermissionDenied(err) {
t.Fatalf("bad: %v", err)
args = &structs.RegisterRequest{
Node: "node",
Address: "127.0.0.2",
Checks: []*structs.HealthCheck{
{
Node: "node",
ServiceID: "my-id",
},
{
Node: "node",
ServiceID: "my-id",
},
},
}
err = vetRegisterWithACL(perms, args, ns)
require.True(t, acl.IsErrPermissionDenied(err))
}
func TestVetDeregisterWithACL(t *testing.T) {

View File

@ -6,6 +6,7 @@ import (
"net"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
@ -899,7 +900,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *structs
}
// Check if this node is "known" by serf
if _, ok := known[check.Node]; ok {
if _, ok := known[strings.ToLower(check.Node)]; ok {
continue
}
@ -1204,7 +1205,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeE
// deregister us later.
//
// TODO(partitions): check partitions here too? server names should be unique in general though
if member.Name == s.config.NodeName {
if strings.EqualFold(member.Name, s.config.NodeName) {
s.logger.Warn("deregistering self should be done by follower",
"name", s.config.NodeName,
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),

View File

@ -189,12 +189,8 @@ func TestLeader_LeftMember(t *testing.T) {
// Should be registered
retry.Run(t, func(r *retry.R) {
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
}
if node == nil {
r.Fatal("client not registered")
}
require.NoError(r, err)
require.NotNil(r, node, "client not registered")
})
// Node should leave
@ -204,14 +200,11 @@ func TestLeader_LeftMember(t *testing.T) {
// Should be deregistered
retry.Run(t, func(r *retry.R) {
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
}
if node != nil {
r.Fatal("client still registered")
}
require.NoError(r, err)
require.Nil(r, node, "client still registered")
})
}
func TestLeader_ReapMember(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
@ -239,12 +232,8 @@ func TestLeader_ReapMember(t *testing.T) {
// Should be registered
retry.Run(t, func(r *retry.R) {
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
r.Fatalf("err: %v", err)
}
if node == nil {
r.Fatal("client not registered")
}
require.NoError(r, err)
require.NotNil(r, node, "client not registered")
})
// Simulate a node reaping
@ -264,9 +253,7 @@ func TestLeader_ReapMember(t *testing.T) {
reaped := false
for start := time.Now(); time.Since(start) < 5*time.Second; {
_, node, err := state.GetNode(c1.config.NodeName, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
require.NoError(t, err)
if node == nil {
reaped = true
break
@ -277,6 +264,85 @@ func TestLeader_ReapMember(t *testing.T) {
}
}
func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
run := func(t *testing.T, status serf.MemberStatus, nameFn func(string) string) {
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = "root"
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
nodeName := s1.config.NodeName
if nameFn != nil {
nodeName = nameFn(nodeName)
}
state := s1.fsm.State()
// Should be registered
retry.Run(t, func(r *retry.R) {
_, node, err := state.GetNode(nodeName, nil)
require.NoError(r, err)
require.NotNil(r, node, "server not registered")
})
// Simulate THIS node reaping or leaving
mems := s1.LANMembersInAgentPartition()
var s1mem serf.Member
for _, m := range mems {
if strings.EqualFold(m.Name, nodeName) {
s1mem = m
s1mem.Status = status
s1mem.Name = nodeName
break
}
}
s1.reconcileCh <- s1mem
// Should NOT be deregistered; we have to poll quickly here because
// anti-entropy will put it back if it did get deleted.
reaped := false
for start := time.Now(); time.Since(start) < 5*time.Second; {
_, node, err := state.GetNode(nodeName, nil)
require.NoError(t, err)
if node == nil {
reaped = true
break
}
}
if reaped {
t.Fatalf("server should still be registered")
}
}
t.Run("original name", func(t *testing.T) {
t.Run("left", func(t *testing.T) {
run(t, serf.StatusLeft, nil)
})
t.Run("reap", func(t *testing.T) {
run(t, StatusReap, nil)
})
})
t.Run("uppercased name", func(t *testing.T) {
t.Run("left", func(t *testing.T) {
run(t, serf.StatusLeft, strings.ToUpper)
})
t.Run("reap", func(t *testing.T) {
run(t, StatusReap, strings.ToUpper)
})
})
}
func TestLeader_CheckServersMeta(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -2,6 +2,7 @@ package consul
import (
"fmt"
"strings"
"sync"
"github.com/hashicorp/go-version"
@ -38,7 +39,7 @@ func (md *lanMergeDelegate) NotifyMerge(members []*serf.Member) error {
nodeID := types.NodeID(rawID)
// See if there's another node that conflicts with us.
if (nodeID == md.nodeID) && (m.Name != md.nodeName) {
if (nodeID == md.nodeID) && !strings.EqualFold(m.Name, md.nodeName) {
return fmt.Errorf("Member '%s' has conflicting node ID '%s' with this agent's ID",
m.Name, nodeID)
}

View File

@ -58,6 +58,30 @@ func TestMerge_LAN(t *testing.T) {
},
expect: "wrong datacenter",
},
"node ID conflict with delegate's ID but same node name with same casing": {
members: []*serf.Member{
makeTestNode(t, testMember{
dc: "dc1",
name: "node0",
id: thisNodeID,
server: true,
build: "0.7.5",
}),
},
expect: "",
},
"node ID conflict with delegate's ID but same node name with different casing": {
members: []*serf.Member{
makeTestNode(t, testMember{
dc: "dc1",
name: "NoDe0",
id: thisNodeID,
server: true,
build: "0.7.5",
}),
},
expect: "",
},
"node ID conflict with delegate's ID": {
members: []*serf.Member{
makeTestNode(t, testMember{

View File

@ -439,7 +439,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
// position 0, provided the results are from the same datacenter.
if qs.Node != "" && reply.Datacenter == qs.Datacenter {
for i, node := range reply.Nodes {
if node.Node.Node == qs.Node {
if strings.EqualFold(node.Node.Node, qs.Node) {
reply.Nodes[0], reply.Nodes[i] = reply.Nodes[i], reply.Nodes[0]
break
}

View File

@ -5,6 +5,7 @@ package consul
import (
"fmt"
"strings"
"time"
"github.com/armon/go-metrics"
@ -138,10 +139,11 @@ func (s *Server) reconcile() (err error) {
members := s.serfLAN.Members()
knownMembers := make(map[string]struct{})
for _, member := range members {
memberName := strings.ToLower(member.Name)
if err := s.reconcileMember(member); err != nil {
return err
}
knownMembers[member.Name] = struct{}{}
knownMembers[memberName] = struct{}{}
}
// Reconcile any members that have been reaped while we were not the

View File

@ -136,7 +136,7 @@ func (s *Store) ensureCheckIfNodeMatches(
nodePartition string,
check *structs.HealthCheck,
) error {
if check.Node != node || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) {
if !strings.EqualFold(check.Node, node) || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) {
return fmt.Errorf("check node %q does not match node %q",
printNodeName(check.Node, check.PartitionOrDefault()),
printNodeName(node, nodePartition),
@ -330,7 +330,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
}
if existing != nil {
n = existing
if n.Node != node.Node {
if !strings.EqualFold(n.Node, node.Node) {
// Lets first get all nodes and check whether name do match, we do not allow clash on nodes without ID
dupNameError := ensureNoNodeWithSimilarNameTxn(tx, node, false)
if dupNameError != nil {

View File

@ -105,7 +105,7 @@ type nodeServiceTuple struct {
func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple {
return nodeServiceTuple{
Node: sn.Node,
Node: strings.ToLower(sn.Node),
ServiceID: sn.ServiceID,
EntMeta: sn.EnterpriseMeta,
}
@ -113,7 +113,7 @@ func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTupl
func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServiceTuple {
return nodeServiceTuple{
Node: hc.Node,
Node: strings.ToLower(hc.Node),
ServiceID: hc.ServiceID,
EntMeta: hc.EnterpriseMeta,
}

View File

@ -3,22 +3,29 @@
package state
import "github.com/hashicorp/consul/agent/structs"
import (
"strings"
"github.com/hashicorp/consul/agent/structs"
)
func (nst nodeServiceTuple) nodeTuple() nodeTuple {
return nodeTuple{Node: nst.Node, Partition: ""}
return nodeTuple{
Node: strings.ToLower(nst.Node),
Partition: "",
}
}
func newNodeTupleFromNode(node *structs.Node) nodeTuple {
return nodeTuple{
Node: node.Node,
Node: strings.ToLower(node.Node),
Partition: "",
}
}
func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple {
return nodeTuple{
Node: hc.Node,
Node: strings.ToLower(hc.Node),
Partition: "",
}
}

View File

@ -471,8 +471,11 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
}
// Add in a top-level check.
//
// Verify that node name references in checks are case-insensitive during
// restore.
req.Check = &structs.HealthCheck{
Node: nodeName,
Node: strings.ToUpper(nodeName),
CheckID: "check1",
Name: "check",
RaftIndex: structs.RaftIndex{
@ -499,7 +502,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
t.Fatalf("bad: %#v", out)
}
c := out[0]
if c.Node != nodeName || c.CheckID != "check1" || c.Name != "check" ||
if c.Node != strings.ToUpper(nodeName) || c.CheckID != "check1" || c.Name != "check" ||
c.CreateIndex != 3 || c.ModifyIndex != 3 {
t.Fatalf("bad check returned: %#v", c)
}
@ -545,7 +548,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
t.Fatalf("bad: %#v", out)
}
c1 := out[0]
if c1.Node != nodeName || c1.CheckID != "check1" || c1.Name != "check" ||
if c1.Node != strings.ToUpper(nodeName) || c1.CheckID != "check1" || c1.Name != "check" ||
c1.CreateIndex != 3 || c1.ModifyIndex != 3 {
t.Fatalf("bad check returned, should not be modified: %#v", c1)
}

View File

@ -3,6 +3,7 @@ package consul
import (
"runtime"
"strconv"
"strings"
"github.com/hashicorp/go-version"
"github.com/hashicorp/serf/serf"
@ -161,7 +162,7 @@ func (c *Client) CheckServers(datacenter string, fn func(*metadata.Server) bool)
func isSerfMember(s *serf.Serf, nodeName string) bool {
for _, m := range s.Members() {
if m.Name == nodeName {
if strings.EqualFold(m.Name, nodeName) {
return true
}
}

View File

@ -452,7 +452,7 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool {
// Check if any of the node-level fields are being changed.
if r.ID != node.ID ||
r.Node != node.Node ||
!strings.EqualFold(r.Node, node.Node) ||
r.PartitionOrDefault() != node.PartitionOrDefault() ||
r.Address != node.Address ||
r.Datacenter != node.Datacenter ||
@ -796,7 +796,7 @@ type Nodes []*Node
// RaftIndex fields.
func (n *Node) IsSame(other *Node) bool {
return n.ID == other.ID &&
n.Node == other.Node &&
strings.EqualFold(n.Node, other.Node) &&
n.PartitionOrDefault() == other.PartitionOrDefault() &&
n.Address == other.Address &&
n.Datacenter == other.Datacenter &&
@ -1431,7 +1431,7 @@ func (s *ServiceNode) IsSameService(other *ServiceNode) bool {
// TaggedAddresses map[string]string
// NodeMeta map[string]string
if s.ID != other.ID ||
s.Node != other.Node ||
!strings.EqualFold(s.Node, other.Node) ||
s.ServiceKind != other.ServiceKind ||
s.ServiceID != other.ServiceID ||
s.ServiceName != other.ServiceName ||
@ -1675,7 +1675,7 @@ func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
// useful for seeing if an update would be idempotent for all the functional
// parts of the structure.
func (c *HealthCheck) IsSame(other *HealthCheck) bool {
if c.Node != other.Node ||
if !strings.EqualFold(c.Node, other.Node) ||
c.CheckID != other.CheckID ||
c.Name != other.Name ||
c.Status != other.Status ||

View File

@ -73,16 +73,6 @@ func TestStructs_Implements(t *testing.T) {
}
func TestStructs_RegisterRequest_ChangesNode(t *testing.T) {
req := &RegisterRequest{
ID: types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5"),
Node: "test",
Address: "127.0.0.1",
Datacenter: "dc1",
TaggedAddresses: make(map[string]string),
NodeMeta: map[string]string{
"role": "server",
},
}
node := &Node{
ID: types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5"),
@ -95,41 +85,104 @@ func TestStructs_RegisterRequest_ChangesNode(t *testing.T) {
},
}
check := func(twiddle, restore func()) {
type testcase struct {
name string
setup func(*RegisterRequest)
expect bool
}
cases := []testcase{
{
name: "id",
setup: func(r *RegisterRequest) {
r.ID = "nope"
},
expect: true,
},
{
name: "name",
setup: func(r *RegisterRequest) {
r.Node = "nope"
},
expect: true,
},
{
name: "name casing",
setup: func(r *RegisterRequest) {
r.Node = "TeSt"
},
expect: false,
},
{
name: "address",
setup: func(r *RegisterRequest) {
r.Address = "127.0.0.2"
},
expect: true,
},
{
name: "dc",
setup: func(r *RegisterRequest) {
r.Datacenter = "dc2"
},
expect: true,
},
{
name: "tagged addresses",
setup: func(r *RegisterRequest) {
r.TaggedAddresses["wan"] = "nope"
},
expect: true,
},
{
name: "node meta",
setup: func(r *RegisterRequest) {
r.NodeMeta["invalid"] = "nope"
},
expect: true,
},
}
run := func(t *testing.T, tc testcase) {
req := &RegisterRequest{
ID: types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5"),
Node: "test",
Address: "127.0.0.1",
Datacenter: "dc1",
TaggedAddresses: make(map[string]string),
NodeMeta: map[string]string{
"role": "server",
},
}
if req.ChangesNode(node) {
t.Fatalf("should not change")
}
twiddle()
tc.setup(req)
if tc.expect {
if !req.ChangesNode(node) {
t.Fatalf("should change")
}
} else {
if req.ChangesNode(node) {
t.Fatalf("should not change")
}
}
t.Run("skip node update", func(t *testing.T) {
req.SkipNodeUpdate = true
if req.ChangesNode(node) {
t.Fatalf("should skip")
}
req.SkipNodeUpdate = false
if !req.ChangesNode(node) {
t.Fatalf("should change")
})
}
restore()
if req.ChangesNode(node) {
t.Fatalf("should not change")
}
}
check(func() { req.ID = "nope" }, func() { req.ID = types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5") })
check(func() { req.Node = "nope" }, func() { req.Node = "test" })
check(func() { req.Address = "127.0.0.2" }, func() { req.Address = "127.0.0.1" })
check(func() { req.Datacenter = "dc2" }, func() { req.Datacenter = "dc1" })
check(func() { req.TaggedAddresses["wan"] = "nope" }, func() { delete(req.TaggedAddresses, "wan") })
check(func() { req.NodeMeta["invalid"] = "nope" }, func() { delete(req.NodeMeta, "invalid") })
if !req.ChangesNode(nil) {
t.Fatalf("should change")
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}
@ -234,6 +287,65 @@ func TestNode_IsSame(t *testing.T) {
ModifyIndex: 2,
},
}
type testcase struct {
name string
setup func(*Node)
expect bool
}
cases := []testcase{
{
name: "id",
setup: func(n *Node) {
n.ID = types.NodeID("")
},
expect: false,
},
{
name: "node",
setup: func(n *Node) {
n.Node = "other"
},
expect: false,
},
{
name: "node casing",
setup: func(n *Node) {
n.Node = "MyNoDe1"
},
expect: true,
},
{
name: "dc",
setup: func(n *Node) {
n.Datacenter = "dcX"
},
expect: false,
},
{
name: "address",
setup: func(n *Node) {
n.Address = "127.0.0.1"
},
expect: false,
},
{
name: "tagged addresses",
setup: func(n *Node) {
n.TaggedAddresses = map[string]string{"my": "address"}
},
expect: false,
},
{
name: "meta",
setup: func(n *Node) {
n.Meta = map[string]string{"my": "meta"}
},
expect: false,
},
}
run := func(t *testing.T, tc testcase) {
other := &Node{
ID: id,
Node: node,
@ -246,86 +358,157 @@ func TestNode_IsSame(t *testing.T) {
ModifyIndex: 3,
},
}
check := func(twiddle, restore func()) {
t.Helper()
if !n.IsSame(other) || !other.IsSame(n) {
t.Fatalf("should be the same")
}
twiddle()
tc.setup(other)
if tc.expect {
if !n.IsSame(other) || !other.IsSame(n) {
t.Fatalf("should be the same")
}
} else {
if n.IsSame(other) || other.IsSame(n) {
t.Fatalf("should be different, was %#v VS %#v", n, other)
}
restore()
if !n.IsSame(other) || !other.IsSame(n) {
t.Fatalf("should be the same")
}
}
check(func() { other.ID = types.NodeID("") }, func() { other.ID = id })
check(func() { other.Node = "other" }, func() { other.Node = node })
check(func() { other.Datacenter = "dcX" }, func() { other.Datacenter = datacenter })
check(func() { other.Address = "127.0.0.1" }, func() { other.Address = address })
check(func() { other.TaggedAddresses = map[string]string{"my": "address"} }, func() { other.TaggedAddresses = map[string]string{} })
check(func() { other.Meta = map[string]string{"my": "meta"} }, func() { other.Meta = map[string]string{} })
if !n.IsSame(other) {
t.Fatalf("should be equal, was %#v VS %#v", n, other)
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}
func TestStructs_ServiceNode_IsSameService(t *testing.T) {
sn := testServiceNode(t)
node := "node1"
serviceID := sn.ServiceID
serviceAddress := sn.ServiceAddress
serviceEnableTagOverride := sn.ServiceEnableTagOverride
serviceMeta := make(map[string]string)
for k, v := range sn.ServiceMeta {
serviceMeta[k] = v
const (
nodeName = "node1"
)
type testcase struct {
name string
setup func(*ServiceNode)
expect bool
}
cases := []testcase{
{
name: "ServiceID",
setup: func(sn *ServiceNode) {
sn.ServiceID = "66fb695a-c782-472f-8d36-4f3edd754b37"
},
},
{
name: "Node",
setup: func(sn *ServiceNode) {
sn.Node = "other"
},
},
{
name: "Node casing",
setup: func(sn *ServiceNode) {
sn.Node = "NoDe1"
},
expect: true,
},
{
name: "ServiceAddress",
setup: func(sn *ServiceNode) {
sn.ServiceAddress = "1.2.3.4"
},
},
{
name: "ServiceEnableTagOverride",
setup: func(sn *ServiceNode) {
sn.ServiceEnableTagOverride = !sn.ServiceEnableTagOverride
},
},
{
name: "ServiceKind",
setup: func(sn *ServiceNode) {
sn.ServiceKind = "newKind"
},
},
{
name: "ServiceMeta",
setup: func(sn *ServiceNode) {
sn.ServiceMeta = map[string]string{"my": "meta"}
},
},
{
name: "ServiceName",
setup: func(sn *ServiceNode) {
sn.ServiceName = "duck"
},
},
{
name: "ServicePort",
setup: func(sn *ServiceNode) {
sn.ServicePort = 65534
},
},
{
name: "ServiceTags",
setup: func(sn *ServiceNode) {
sn.ServiceTags = []string{"new", "tags"}
},
},
{
name: "ServiceWeights",
setup: func(sn *ServiceNode) {
sn.ServiceWeights = Weights{Passing: 42, Warning: 41}
},
},
{
name: "ServiceProxy",
setup: func(sn *ServiceNode) {
sn.ServiceProxy = ConnectProxyConfig{}
},
},
{
name: "ServiceConnect",
setup: func(sn *ServiceNode) {
sn.ServiceConnect = ServiceConnect{}
},
},
{
name: "ServiceTaggedAddresses",
setup: func(sn *ServiceNode) {
sn.ServiceTaggedAddresses = nil
},
},
}
serviceName := sn.ServiceName
servicePort := sn.ServicePort
serviceTags := sn.ServiceTags
serviceWeights := Weights{Passing: 2, Warning: 1}
sn.ServiceWeights = serviceWeights
serviceProxy := sn.ServiceProxy
serviceConnect := sn.ServiceConnect
serviceTaggedAddresses := sn.ServiceTaggedAddresses
n := sn.ToNodeService().ToServiceNode(node)
other := sn.ToNodeService().ToServiceNode(node)
run := func(t *testing.T, tc testcase) {
sn := testServiceNode(t)
sn.ServiceWeights = Weights{Passing: 2, Warning: 1}
n := sn.ToNodeService().ToServiceNode(nodeName)
other := sn.ToNodeService().ToServiceNode(nodeName)
check := func(twiddle, restore func()) {
t.Helper()
if !n.IsSameService(other) || !other.IsSameService(n) {
t.Fatalf("should be the same")
}
twiddle()
tc.setup(other)
if tc.expect {
if !n.IsSameService(other) || !other.IsSameService(n) {
t.Fatalf("should be the same")
}
} else {
if n.IsSameService(other) || other.IsSameService(n) {
t.Fatalf("should be different, was %#v VS %#v", n, other)
}
restore()
if !n.IsSameService(other) || !other.IsSameService(n) {
t.Fatalf("should be the same after restore, was:\n %#v VS\n %#v", n, other)
}
}
check(func() { other.ServiceID = "66fb695a-c782-472f-8d36-4f3edd754b37" }, func() { other.ServiceID = serviceID })
check(func() { other.Node = "other" }, func() { other.Node = node })
check(func() { other.ServiceAddress = "1.2.3.4" }, func() { other.ServiceAddress = serviceAddress })
check(func() { other.ServiceEnableTagOverride = !serviceEnableTagOverride }, func() { other.ServiceEnableTagOverride = serviceEnableTagOverride })
check(func() { other.ServiceKind = "newKind" }, func() { other.ServiceKind = "" })
check(func() { other.ServiceMeta = map[string]string{"my": "meta"} }, func() { other.ServiceMeta = serviceMeta })
check(func() { other.ServiceName = "duck" }, func() { other.ServiceName = serviceName })
check(func() { other.ServicePort = 65534 }, func() { other.ServicePort = servicePort })
check(func() { other.ServiceTags = []string{"new", "tags"} }, func() { other.ServiceTags = serviceTags })
check(func() { other.ServiceWeights = Weights{Passing: 42, Warning: 41} }, func() { other.ServiceWeights = serviceWeights })
check(func() { other.ServiceProxy = ConnectProxyConfig{} }, func() { other.ServiceProxy = serviceProxy })
check(func() { other.ServiceConnect = ServiceConnect{} }, func() { other.ServiceConnect = serviceConnect })
check(func() { other.ServiceTaggedAddresses = nil }, func() { other.ServiceTaggedAddresses = serviceTaggedAddresses })
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}
func TestStructs_ServiceNode_PartialClone(t *testing.T) {
@ -1175,6 +1358,71 @@ func TestStructs_NodeService_IsSame(t *testing.T) {
}
func TestStructs_HealthCheck_IsSame(t *testing.T) {
type testcase struct {
name string
setup func(*HealthCheck)
expect bool
}
cases := []testcase{
{
name: "Node",
setup: func(hc *HealthCheck) {
hc.Node = "XXX"
},
},
{
name: "Node casing",
setup: func(hc *HealthCheck) {
hc.Node = "NoDe1"
},
expect: true,
},
{
name: "CheckID",
setup: func(hc *HealthCheck) {
hc.CheckID = "XXX"
},
},
{
name: "Name",
setup: func(hc *HealthCheck) {
hc.Name = "XXX"
},
},
{
name: "Status",
setup: func(hc *HealthCheck) {
hc.Status = "XXX"
},
},
{
name: "Notes",
setup: func(hc *HealthCheck) {
hc.Notes = "XXX"
},
},
{
name: "Output",
setup: func(hc *HealthCheck) {
hc.Output = "XXX"
},
},
{
name: "ServiceID",
setup: func(hc *HealthCheck) {
hc.ServiceID = "XXX"
},
},
{
name: "ServiceName",
setup: func(hc *HealthCheck) {
hc.ServiceName = "XXX"
},
},
}
run := func(t *testing.T, tc testcase) {
hc := &HealthCheck{
Node: "node1",
CheckID: "check1",
@ -1186,6 +1434,7 @@ func TestStructs_HealthCheck_IsSame(t *testing.T) {
ServiceName: "theservice",
ServiceTags: []string{"foo"},
}
if !hc.IsSame(hc) {
t.Fatalf("should be equal to itself")
}
@ -1205,52 +1454,29 @@ func TestStructs_HealthCheck_IsSame(t *testing.T) {
ModifyIndex: 2,
},
}
if !hc.IsSame(other) || !other.IsSame(hc) {
t.Fatalf("should not care about Raft fields")
}
checkCheckIDField := func(field *types.CheckID) {
tc.setup(hc)
if tc.expect {
if !hc.IsSame(other) || !other.IsSame(hc) {
t.Fatalf("should be the same")
}
old := *field
*field = "XXX"
} else {
if hc.IsSame(other) || other.IsSame(hc) {
t.Fatalf("should not be the same")
}
*field = old
if !hc.IsSame(other) || !other.IsSame(hc) {
t.Fatalf("should be the same")
}
}
checkStringField := func(field *string) {
if !hc.IsSame(other) || !other.IsSame(hc) {
t.Fatalf("should be the same")
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
old := *field
*field = "XXX"
if hc.IsSame(other) || other.IsSame(hc) {
t.Fatalf("should not be the same")
}
*field = old
if !hc.IsSame(other) || !other.IsSame(hc) {
t.Fatalf("should be the same")
}
}
checkStringField(&other.Node)
checkCheckIDField(&other.CheckID)
checkStringField(&other.Name)
checkStringField(&other.Status)
checkStringField(&other.Notes)
checkStringField(&other.Output)
checkStringField(&other.ServiceID)
checkStringField(&other.ServiceName)
}
func TestStructs_HealthCheck_Marshalling(t *testing.T) {

View File

@ -103,11 +103,11 @@ func (c *cmd) Run(args []string) int {
var area1, area2 string
for _, dc := range dcs {
for _, entry := range dc.Coordinates {
if dc.Datacenter == dc1 && entry.Node == node1 {
if dc.Datacenter == dc1 && strings.EqualFold(entry.Node, node1) {
area1 = dc.AreaID
coord1 = entry.Coord
}
if dc.Datacenter == dc2 && entry.Node == node2 {
if dc.Datacenter == dc2 && strings.EqualFold(entry.Node, node2) {
area2 = dc.AreaID
coord2 = entry.Coord
}
@ -145,10 +145,10 @@ func (c *cmd) Run(args []string) int {
// Index all the coordinates by segment.
cs1, cs2 := make(lib.CoordinateSet), make(lib.CoordinateSet)
for _, entry := range entries {
if entry.Node == nodes[0] {
if strings.EqualFold(entry.Node, nodes[0]) {
cs1[entry.Segment] = entry.Coord
}
if entry.Node == nodes[1] {
if strings.EqualFold(entry.Node, nodes[1]) {
cs2[entry.Segment] = entry.Coord
}
}

View File

@ -7,11 +7,12 @@ import (
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/serf/coordinate"
"github.com/mitchellh/cli"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/serf/coordinate"
"github.com/mitchellh/cli"
)
func TestRTTCommand_noTabs(t *testing.T) {
@ -167,11 +168,15 @@ func TestRTTCommand_WAN(t *testing.T) {
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
node := fmt.Sprintf("%s.%s", a.Config.NodeName, a.Config.Datacenter)
var (
node = fmt.Sprintf("%s.%s", a.Config.NodeName, a.Config.Datacenter)
nodeUpper = fmt.Sprintf("%s.%s", strings.ToUpper(a.Config.NodeName), a.Config.Datacenter)
nodeLower = fmt.Sprintf("%s.%s", strings.ToLower(a.Config.NodeName), a.Config.Datacenter)
)
// We can't easily inject WAN coordinates, so we will just query the
// node with itself.
{
t.Run("query self", func(t *testing.T) {
ui := cli.NewMockUi()
c := New(ui)
args := []string{
@ -189,10 +194,30 @@ func TestRTTCommand_WAN(t *testing.T) {
if !strings.Contains(ui.OutputWriter.String(), "rtt: ") {
t.Fatalf("bad: %#v", ui.OutputWriter.String())
}
})
t.Run("query self with case differences", func(t *testing.T) {
ui := cli.NewMockUi()
c := New(ui)
args := []string{
"-wan",
"-http-addr=" + a.HTTPAddr(),
nodeLower,
nodeUpper,
}
code := c.Run(args)
if code != 0 {
t.Log(ui.OutputWriter.String())
t.Fatalf("bad: %d: %#v", code, ui.ErrorWriter.String())
}
// Default to the agent's node.
{
// Make sure there was some kind of RTT reported in the output.
if !strings.Contains(ui.OutputWriter.String(), "rtt: ") {
t.Fatalf("bad: %#v", ui.OutputWriter.String())
}
})
t.Run("default to the agent's node", func(t *testing.T) {
ui := cli.NewMockUi()
c := New(ui)
args := []string{
@ -209,10 +234,9 @@ func TestRTTCommand_WAN(t *testing.T) {
if !strings.Contains(ui.OutputWriter.String(), "rtt: ") {
t.Fatalf("bad: %#v", ui.OutputWriter.String())
}
}
})
// Try an unknown node.
{
t.Run("try an unknown node", func(t *testing.T) {
ui := cli.NewMockUi()
c := New(ui)
args := []string{
@ -225,5 +249,5 @@ func TestRTTCommand_WAN(t *testing.T) {
if code != 1 {
t.Fatalf("bad: %d: %#v", code, ui.ErrorWriter.String())
}
}
})
}