2015-04-09 20:23:14 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
2015-05-14 01:22:34 +00:00
|
|
|
"fmt"
|
2015-04-09 20:23:14 +00:00
|
|
|
"math/rand"
|
|
|
|
"os"
|
2015-04-29 02:07:10 +00:00
|
|
|
"reflect"
|
2015-06-23 02:14:02 +00:00
|
|
|
"strings"
|
2015-04-09 20:23:14 +00:00
|
|
|
"testing"
|
2015-04-13 20:45:42 +00:00
|
|
|
"time"
|
2015-04-09 20:23:14 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/consul/structs"
|
|
|
|
"github.com/hashicorp/consul/testutil"
|
2015-10-15 23:07:16 +00:00
|
|
|
"github.com/hashicorp/net-rpc-msgpackrpc"
|
2015-04-09 20:23:14 +00:00
|
|
|
"github.com/hashicorp/serf/coordinate"
|
|
|
|
)
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// generateRandomCoordinate creates a random coordinate. This mucks with the
|
|
|
|
// underlying structure directly, so it's not really useful for any particular
|
|
|
|
// position in the network, but it's a good payload to send through to make
|
|
|
|
// sure things come out the other side or get stored correctly.
|
|
|
|
func generateRandomCoordinate() *coordinate.Coordinate {
|
2015-04-09 20:23:14 +00:00
|
|
|
config := coordinate.DefaultConfig()
|
2015-06-06 03:31:33 +00:00
|
|
|
coord := coordinate.NewCoordinate(config)
|
|
|
|
for i := range coord.Vec {
|
|
|
|
coord.Vec[i] = rand.NormFloat64()
|
2015-04-13 20:45:42 +00:00
|
|
|
}
|
2015-06-06 03:31:33 +00:00
|
|
|
coord.Error = rand.NormFloat64()
|
|
|
|
coord.Adjustment = rand.NormFloat64()
|
|
|
|
return coord
|
2015-04-09 20:23:14 +00:00
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// verifyCoordinatesEqual will compare a and b and fail if they are not exactly
|
2015-06-23 02:14:02 +00:00
|
|
|
// equal (no floating point fuzz is considered since we are trying to make sure
|
|
|
|
// we are getting exactly the coordinates we expect, without math on them).
|
2015-06-06 03:31:33 +00:00
|
|
|
func verifyCoordinatesEqual(t *testing.T, a, b *coordinate.Coordinate) {
|
|
|
|
if !reflect.DeepEqual(a, b) {
|
|
|
|
t.Fatalf("coordinates are not equal: %v != %v", a, b)
|
|
|
|
}
|
2015-04-09 20:23:14 +00:00
|
|
|
}
|
|
|
|
|
2015-05-14 02:09:58 +00:00
|
|
|
func TestCoordinate_Update(t *testing.T) {
|
2015-05-14 01:22:34 +00:00
|
|
|
name := fmt.Sprintf("Node %d", getPort())
|
|
|
|
dir1, config1 := testServerConfig(t, name)
|
2015-06-06 03:31:33 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
|
2015-06-23 02:14:02 +00:00
|
|
|
config1.CoordinateUpdatePeriod = 500 * time.Millisecond
|
2015-06-29 22:53:29 +00:00
|
|
|
config1.CoordinateUpdateBatchSize = 5
|
|
|
|
config1.CoordinateUpdateMaxBatches = 2
|
2015-05-14 01:22:34 +00:00
|
|
|
s1, err := NewServer(config1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-09 20:23:14 +00:00
|
|
|
defer s1.Shutdown()
|
2015-05-14 01:22:34 +00:00
|
|
|
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
2015-04-09 20:23:14 +00:00
|
|
|
|
2015-10-23 22:19:14 +00:00
|
|
|
// Register some nodes.
|
|
|
|
nodes := []string{"node1", "node2"}
|
|
|
|
for _, node := range nodes {
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: node,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var reply struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-23 02:14:02 +00:00
|
|
|
// Send an update for the first node.
|
2015-05-08 08:31:34 +00:00
|
|
|
arg1 := structs.CoordinateUpdateRequest{
|
2015-04-18 21:05:29 +00:00
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "node1",
|
2015-06-06 03:31:33 +00:00
|
|
|
Coord: generateRandomCoordinate(),
|
2015-04-09 20:23:14 +00:00
|
|
|
}
|
2015-06-23 02:14:02 +00:00
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg1, &out); err != nil {
|
2015-06-23 02:14:02 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-04-09 20:23:14 +00:00
|
|
|
|
2015-06-23 02:14:02 +00:00
|
|
|
// Send an update for the second node.
|
2015-05-08 08:31:34 +00:00
|
|
|
arg2 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "node2",
|
2015-06-06 03:31:33 +00:00
|
|
|
Coord: generateRandomCoordinate(),
|
2015-05-08 08:31:34 +00:00
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg2, &out); err != nil {
|
2015-04-09 20:23:14 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-06-23 02:14:02 +00:00
|
|
|
// Make sure the updates did not yet apply because the update period
|
|
|
|
// hasn't expired.
|
2015-04-09 20:23:14 +00:00
|
|
|
state := s1.fsm.State()
|
2015-10-23 22:19:14 +00:00
|
|
|
c, err := state.CoordinateGetRaw("node1")
|
2015-04-09 20:23:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-06-27 00:35:35 +00:00
|
|
|
if c != nil {
|
2015-05-08 08:31:34 +00:00
|
|
|
t.Fatalf("should be nil because the update should be batched")
|
|
|
|
}
|
2015-10-23 22:19:14 +00:00
|
|
|
c, err = state.CoordinateGetRaw("node2")
|
2015-06-23 02:14:02 +00:00
|
|
|
if err != nil {
|
2015-05-08 08:31:34 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-06-27 00:35:35 +00:00
|
|
|
if c != nil {
|
2015-06-23 02:14:02 +00:00
|
|
|
t.Fatalf("should be nil because the update should be batched")
|
|
|
|
}
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2015-10-23 22:19:14 +00:00
|
|
|
// Send another update for the second node. It should take precedence
|
|
|
|
// since there will be two updates in the same batch.
|
|
|
|
arg2.Coord = generateRandomCoordinate()
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg2, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-06-23 02:14:02 +00:00
|
|
|
// Wait a while and the updates should get picked up.
|
2016-03-21 23:44:35 +00:00
|
|
|
time.Sleep(3 * s1.config.CoordinateUpdatePeriod)
|
2015-10-23 22:19:14 +00:00
|
|
|
c, err = state.CoordinateGetRaw("node1")
|
2015-05-08 08:31:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-06-27 00:35:35 +00:00
|
|
|
if c == nil {
|
2015-05-08 08:31:34 +00:00
|
|
|
t.Fatalf("should return a coordinate but it's nil")
|
|
|
|
}
|
2015-06-27 00:35:35 +00:00
|
|
|
verifyCoordinatesEqual(t, c, arg1.Coord)
|
2015-10-23 22:19:14 +00:00
|
|
|
c, err = state.CoordinateGetRaw("node2")
|
2015-05-08 08:31:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-06-27 00:35:35 +00:00
|
|
|
if c == nil {
|
2015-05-08 08:31:34 +00:00
|
|
|
t.Fatalf("should return a coordinate but it's nil")
|
|
|
|
}
|
2015-06-27 00:35:35 +00:00
|
|
|
verifyCoordinatesEqual(t, c, arg2.Coord)
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2015-10-23 22:19:14 +00:00
|
|
|
// Register a bunch of additional nodes.
|
|
|
|
spamLen := s1.config.CoordinateUpdateBatchSize*s1.config.CoordinateUpdateMaxBatches + 1
|
|
|
|
for i := 0; i < spamLen; i++ {
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: fmt.Sprintf("bogusnode%d", i),
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var reply struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-29 22:53:29 +00:00
|
|
|
// Now spam some coordinate updates and make sure it starts throwing
|
2015-07-29 23:33:25 +00:00
|
|
|
// them away if they exceed the batch allowance. Note we have to make
|
2015-06-29 22:53:29 +00:00
|
|
|
// unique names since these are held in map by node name.
|
|
|
|
for i := 0; i < spamLen; i++ {
|
|
|
|
arg1.Node = fmt.Sprintf("bogusnode%d", i)
|
2015-06-06 03:31:33 +00:00
|
|
|
arg1.Coord = generateRandomCoordinate()
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg1, &out); err != nil {
|
2015-06-06 03:31:33 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2015-04-09 20:23:14 +00:00
|
|
|
}
|
2015-06-06 03:31:33 +00:00
|
|
|
|
2015-06-23 02:14:02 +00:00
|
|
|
// Wait a little while for the batch routine to run, then make sure
|
2015-06-29 22:53:29 +00:00
|
|
|
// exactly one of the updates got dropped (we won't know which one).
|
2016-03-21 23:44:35 +00:00
|
|
|
time.Sleep(3 * s1.config.CoordinateUpdatePeriod)
|
2015-06-29 22:53:29 +00:00
|
|
|
numDropped := 0
|
|
|
|
for i := 0; i < spamLen; i++ {
|
2015-10-23 22:19:14 +00:00
|
|
|
c, err = state.CoordinateGetRaw(fmt.Sprintf("bogusnode%d", i))
|
2015-06-29 22:53:29 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if c == nil {
|
|
|
|
numDropped++
|
|
|
|
}
|
2015-06-06 03:31:33 +00:00
|
|
|
}
|
2015-06-29 22:53:29 +00:00
|
|
|
if numDropped != 1 {
|
|
|
|
t.Fatalf("wrong number of coordinates dropped, %d != 1", numDropped)
|
2015-06-06 03:31:33 +00:00
|
|
|
}
|
2015-06-27 20:26:41 +00:00
|
|
|
|
|
|
|
// Finally, send a coordinate with the wrong dimensionality to make sure
|
|
|
|
// there are no panics, and that it gets rejected.
|
|
|
|
arg2.Coord.Vec = make([]float64, 2*len(arg2.Coord.Vec))
|
2015-10-15 23:07:16 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg2, &out)
|
2015-06-27 20:26:41 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "rejected bad coordinate") {
|
|
|
|
t.Fatalf("should have failed with an error, got %v", err)
|
|
|
|
}
|
2015-04-29 01:47:41 +00:00
|
|
|
}
|
|
|
|
|
2016-12-12 19:58:31 +00:00
|
|
|
func TestCoordinate_Update_ACLDeny(t *testing.T) {
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = false
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register some nodes.
|
|
|
|
nodes := []string{"node1", "node2"}
|
|
|
|
for _, node := range nodes {
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: node,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var reply struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send an update for the first node. This should go through since we
|
|
|
|
// don't have version 8 ACLs enforced yet.
|
|
|
|
req := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "node1",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now turn on version 8 enforcement and try again.
|
|
|
|
s1.config.ACLEnforceVersion8 = true
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an ACL that can write to the node.
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: `
|
|
|
|
node "node1" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var id string
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &id); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the token, it should now go through.
|
|
|
|
req.Token = id
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// But it should be blocked for the other node.
|
|
|
|
req.Node = "node2"
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-29 23:33:25 +00:00
|
|
|
func TestCoordinate_ListDatacenters(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
2015-07-29 23:33:25 +00:00
|
|
|
|
2015-10-15 23:07:16 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
2015-07-29 23:33:25 +00:00
|
|
|
|
|
|
|
// It's super hard to force the Serfs into a known configuration of
|
|
|
|
// coordinates, so the best we can do is make sure our own DC shows
|
|
|
|
// up in the list with the proper coordinates. The guts of the algorithm
|
|
|
|
// are extensively tested in rtt_test.go using a mock database.
|
|
|
|
var out []structs.DatacenterMap
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListDatacenters", struct{}{}, &out); err != nil {
|
2015-07-29 23:33:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(out) != 1 ||
|
|
|
|
out[0].Datacenter != "dc1" ||
|
|
|
|
len(out[0].Coordinates) != 1 ||
|
|
|
|
out[0].Coordinates[0].Node != s1.config.NodeName {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
c, err := s1.serfWAN.GetCoordinate()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
verifyCoordinatesEqual(t, c, out[0].Coordinates[0].Coord)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoordinate_ListNodes(t *testing.T) {
|
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2015-10-15 23:07:16 +00:00
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
2015-07-29 23:33:25 +00:00
|
|
|
|
2015-10-23 22:19:14 +00:00
|
|
|
// Register some nodes.
|
|
|
|
nodes := []string{"foo", "bar", "baz"}
|
|
|
|
for _, node := range nodes {
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: node,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
var reply struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 19:58:31 +00:00
|
|
|
// Send coordinate updates for a few nodes.
|
2015-07-29 23:33:25 +00:00
|
|
|
arg1 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
}
|
|
|
|
var out struct{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg1, &out); err != nil {
|
2015-07-29 23:33:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg2 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg2, &out); err != nil {
|
2015-07-29 23:33:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg3 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg3, &out); err != nil {
|
2015-07-29 23:33:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-07-30 18:31:35 +00:00
|
|
|
// Now query back for all the nodes.
|
2016-12-12 19:58:31 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
arg := structs.DCSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
}
|
|
|
|
resp := structs.IndexedCoordinates{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(resp.Coordinates) != 3 ||
|
|
|
|
resp.Coordinates[0].Node != "bar" ||
|
|
|
|
resp.Coordinates[1].Node != "baz" ||
|
|
|
|
resp.Coordinates[2].Node != "foo" {
|
|
|
|
return false, fmt.Errorf("bad: %v", resp.Coordinates)
|
|
|
|
}
|
|
|
|
verifyCoordinatesEqual(t, resp.Coordinates[0].Coord, arg2.Coord) // bar
|
|
|
|
verifyCoordinatesEqual(t, resp.Coordinates[1].Coord, arg3.Coord) // baz
|
|
|
|
verifyCoordinatesEqual(t, resp.Coordinates[2].Coord, arg1.Coord) // foo
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) { t.Fatalf("err: %v", err) })
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
|
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = false
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register some nodes.
|
|
|
|
nodes := []string{"foo", "bar", "baz"}
|
|
|
|
for _, node := range nodes {
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: node,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var reply struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send coordinate updates for a few nodes.
|
|
|
|
arg1 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg1, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg2 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg2, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
arg3 := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "baz",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg3, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all the coordinate updates to apply. Since we aren't
|
|
|
|
// enforcing version 8 ACLs, this should also allow us to read
|
|
|
|
// everything back without a token.
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
arg := structs.DCSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
}
|
|
|
|
resp := structs.IndexedCoordinates{}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(resp.Coordinates) == 3 {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, fmt.Errorf("bad: %v", resp.Coordinates)
|
|
|
|
}, func(err error) { t.Fatalf("err: %v", err) })
|
|
|
|
|
|
|
|
// Now that we've waited for the batch processing to ingest the
|
|
|
|
// coordinates we can do the rest of the requests without the loop. We
|
|
|
|
// will start by turning on version 8 ACL support which should block
|
|
|
|
// everything.
|
|
|
|
s1.config.ACLEnforceVersion8 = true
|
2015-07-29 23:33:25 +00:00
|
|
|
arg := structs.DCSpecificRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
}
|
|
|
|
resp := structs.IndexedCoordinates{}
|
2015-10-15 23:07:16 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
|
2015-07-29 23:33:25 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-12-12 19:58:31 +00:00
|
|
|
if len(resp.Coordinates) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", resp.Coordinates)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an ACL that can read one of the nodes.
|
|
|
|
var id string
|
|
|
|
{
|
|
|
|
req := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: `
|
|
|
|
node "foo" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &id); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the token, it should now go through.
|
|
|
|
arg.Token = id
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(resp.Coordinates) != 1 || resp.Coordinates[0].Node != "foo" {
|
|
|
|
t.Fatalf("bad: %#v", resp.Coordinates)
|
2015-07-29 23:33:25 +00:00
|
|
|
}
|
|
|
|
}
|