open-consul/agent/consul/internal_endpoint_test.go

657 lines
17 KiB
Go
Raw Normal View History

2014-04-27 20:01:37 +00:00
package consul
import (
"encoding/base64"
2014-04-27 20:01:37 +00:00
"os"
"strings"
2014-04-27 20:01:37 +00:00
"testing"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/testrpc"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/stretchr/testify/require"
2014-04-27 20:01:37 +00:00
)
2014-04-28 21:44:36 +00:00
func TestInternal_NodeInfo(t *testing.T) {
t.Parallel()
2014-04-27 20:01:37 +00:00
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
2014-04-27 20:01:37 +00:00
testrpc.WaitForLeader(t, s1.RPC, "dc1")
2014-04-27 20:01:37 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
2014-04-27 20:01:37 +00:00
ServiceID: "db",
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
2014-04-27 20:01:37 +00:00
t.Fatalf("err: %v", err)
}
var out2 structs.IndexedNodeDump
req := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: "foo",
}
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out2); err != nil {
2014-04-27 20:01:37 +00:00
t.Fatalf("err: %v", err)
}
nodes := out2.Dump
if len(nodes) != 1 {
t.Fatalf("Bad: %v", nodes)
}
if nodes[0].Node != "foo" {
t.Fatalf("Bad: %v", nodes[0])
}
if !lib.StrContains(nodes[0].Services[0].Tags, "master") {
2014-04-27 20:01:37 +00:00
t.Fatalf("Bad: %v", nodes[0])
}
if nodes[0].Checks[0].Status != api.HealthPassing {
2014-04-27 20:01:37 +00:00
t.Fatalf("Bad: %v", nodes[0])
}
}
2014-04-28 21:44:36 +00:00
func TestInternal_NodeDump(t *testing.T) {
t.Parallel()
2014-04-27 20:01:37 +00:00
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
2014-04-27 20:01:37 +00:00
testrpc.WaitForLeader(t, s1.RPC, "dc1")
2014-04-27 20:01:37 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
2014-04-27 20:01:37 +00:00
ServiceID: "db",
},
}
var out struct{}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
2014-04-27 20:01:37 +00:00
t.Fatalf("err: %v", err)
}
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"slave"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
2014-04-27 20:01:37 +00:00
ServiceID: "db",
},
}
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
2014-04-27 20:01:37 +00:00
t.Fatalf("err: %v", err)
}
var out2 structs.IndexedNodeDump
req := structs.DCSpecificRequest{
Datacenter: "dc1",
}
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2); err != nil {
2014-04-27 20:01:37 +00:00
t.Fatalf("err: %v", err)
}
nodes := out2.Dump
if len(nodes) != 3 {
t.Fatalf("Bad: %v", nodes)
}
var foundFoo, foundBar bool
for _, node := range nodes {
switch node.Node {
case "foo":
foundFoo = true
if !lib.StrContains(node.Services[0].Tags, "master") {
2014-04-27 20:01:37 +00:00
t.Fatalf("Bad: %v", nodes[0])
}
if node.Checks[0].Status != api.HealthPassing {
2014-04-27 20:01:37 +00:00
t.Fatalf("Bad: %v", nodes[0])
}
case "bar":
foundBar = true
if !lib.StrContains(node.Services[0].Tags, "slave") {
2014-04-27 20:01:37 +00:00
t.Fatalf("Bad: %v", nodes[1])
}
if node.Checks[0].Status != api.HealthWarning {
2014-04-27 20:01:37 +00:00
t.Fatalf("Bad: %v", nodes[1])
}
default:
continue
}
}
if !foundFoo || !foundBar {
t.Fatalf("missing foo or bar")
}
}
func TestInternal_NodeDump_Filter(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"master"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthPassing,
ServiceID: "db",
},
}
var out struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
Tags: []string{"slave"},
},
Check: &structs.HealthCheck{
Name: "db connect",
Status: api.HealthWarning,
ServiceID: "db",
},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
var out2 structs.IndexedNodeDump
req := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Filter: "master in Services.Tags"},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
nodes := out2.Dump
require.Len(t, nodes, 1)
require.Equal(t, "foo", nodes[0].Node)
}
func TestInternal_KeyringOperation(t *testing.T) {
t.Parallel()
key1 := "H1dfkSZOVnP/JUnaBfTzXg=="
keyBytes1, err := base64.StdEncoding.DecodeString(key1)
if err != nil {
t.Fatalf("err: %s", err)
}
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = keyBytes1
c.SerfWANConfig.MemberlistConfig.SecretKey = keyBytes1
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
var out structs.KeyringResponses
req := structs.KeyringRequest{
Operation: structs.KeyringList,
Datacenter: "dc1",
}
if err := msgpackrpc.CallWithCodec(codec, "Internal.KeyringOperation", &req, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Two responses (local lan/wan pools) from single-node cluster
if len(out.Responses) != 2 {
t.Fatalf("bad: %#v", out)
}
if _, ok := out.Responses[0].Keys[key1]; !ok {
t.Fatalf("bad: %#v", out)
}
wanResp, lanResp := 0, 0
for _, resp := range out.Responses {
if resp.WAN {
wanResp++
} else {
lanResp++
}
}
if lanResp != 1 || wanResp != 1 {
t.Fatalf("should have one lan and one wan response")
}
// Start a second agent to test cross-dc queries
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = keyBytes1
c.SerfWANConfig.MemberlistConfig.SecretKey = keyBytes1
c.Datacenter = "dc2"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
2017-05-05 10:29:49 +00:00
joinWAN(t, s2, s1)
var out2 structs.KeyringResponses
req2 := structs.KeyringRequest{
Operation: structs.KeyringList,
}
if err := msgpackrpc.CallWithCodec(codec, "Internal.KeyringOperation", &req2, &out2); err != nil {
t.Fatalf("err: %v", err)
}
// 3 responses (one from each DC LAN, one from WAN) in two-node cluster
if len(out2.Responses) != 3 {
t.Fatalf("bad: %#v", out2)
}
wanResp, lanResp = 0, 0
for _, resp := range out2.Responses {
if resp.WAN {
wanResp++
} else {
lanResp++
}
}
if lanResp != 2 || wanResp != 1 {
t.Fatalf("should have two lan and one wan response")
}
}
func TestInternal_KeyringOperationList_LocalOnly(t *testing.T) {
t.Parallel()
key1 := "H1dfkSZOVnP/JUnaBfTzXg=="
keyBytes1, err := base64.StdEncoding.DecodeString(key1)
if err != nil {
t.Fatalf("err: %s", err)
}
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = keyBytes1
c.SerfWANConfig.MemberlistConfig.SecretKey = keyBytes1
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Start a second agent to test cross-dc queries
dir2, s2 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = keyBytes1
c.SerfWANConfig.MemberlistConfig.SecretKey = keyBytes1
c.Datacenter = "dc2"
})
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
joinWAN(t, s2, s1)
// --
// Try request with `LocalOnly` set to true
var out structs.KeyringResponses
req := structs.KeyringRequest{
Operation: structs.KeyringList,
LocalOnly: true,
}
if err := msgpackrpc.CallWithCodec(codec, "Internal.KeyringOperation", &req, &out); err != nil {
t.Fatalf("err: %v", err)
}
// 1 response (from this DC LAN)
if len(out.Responses) != 1 {
t.Fatalf("expected num responses to be 1, got %d; out is: %#v", len(out.Responses), out)
}
wanResp, lanResp := 0, 0
for _, resp := range out.Responses {
if resp.WAN {
wanResp++
} else {
lanResp++
}
}
if lanResp != 1 || wanResp != 0 {
t.Fatalf("should have 1 lan and 0 wan response, got (lan=%d) (wan=%d)", lanResp, wanResp)
}
// --
// Try same request again but with `LocalOnly` set to false
req.LocalOnly = false
if err := msgpackrpc.CallWithCodec(codec, "Internal.KeyringOperation", &req, &out); err != nil {
t.Fatalf("err: %v", err)
}
// 3 responses (one from each DC LAN, one from WAN)
if len(out.Responses) != 3 {
t.Fatalf("expected num responses to be 3, got %d; out is: %#v", len(out.Responses), out)
}
wanResp, lanResp = 0, 0
for _, resp := range out.Responses {
if resp.WAN {
wanResp++
} else {
lanResp++
}
}
if lanResp != 2 || wanResp != 1 {
t.Fatalf("should have 2 lan and 1 wan response, got (lan=%d) (wan=%d)", lanResp, wanResp)
}
}
func TestInternal_KeyringOperationWrite_LocalOnly(t *testing.T) {
t.Parallel()
key1 := "H1dfkSZOVnP/JUnaBfTzXg=="
keyBytes1, err := base64.StdEncoding.DecodeString(key1)
if err != nil {
t.Fatalf("err: %s", err)
}
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.SerfLANConfig.MemberlistConfig.SecretKey = keyBytes1
c.SerfWANConfig.MemberlistConfig.SecretKey = keyBytes1
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Try request with `LocalOnly` set to true
var out structs.KeyringResponses
req := structs.KeyringRequest{
Operation: structs.KeyringRemove,
LocalOnly: true,
}
err = msgpackrpc.CallWithCodec(codec, "Internal.KeyringOperation", &req, &out)
if err == nil {
t.Fatalf("expected an error")
}
if !strings.Contains(err.Error(), "LocalOnly") {
t.Fatalf("expected error to contain string 'LocalOnly'. Got: %v", err)
}
}
2015-06-11 20:23:49 +00:00
func TestInternal_NodeInfo_FilterACL(t *testing.T) {
t.Parallel()
dir, token, srv, codec := testACLFilterServer(t)
defer os.RemoveAll(dir)
defer srv.Shutdown()
defer codec.Close()
2015-06-11 20:23:49 +00:00
opt := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: srv.config.NodeName,
QueryOptions: structs.QueryOptions{Token: token},
}
2015-06-11 20:23:49 +00:00
reply := structs.IndexedNodeDump{}
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
t.Fatalf("err: %s", err)
}
2015-06-11 20:23:49 +00:00
for _, info := range reply.Dump {
found := false
for _, chk := range info.Checks {
if chk.ServiceName == "foo" {
found = true
}
2015-06-11 20:23:49 +00:00
if chk.ServiceName == "bar" {
t.Fatalf("bad: %#v", info.Checks)
}
2015-06-11 20:23:49 +00:00
}
if !found {
t.Fatalf("bad: %#v", info.Checks)
}
2015-06-11 20:23:49 +00:00
found = false
for _, svc := range info.Services {
if svc.Service == "foo" {
found = true
}
2015-06-11 20:23:49 +00:00
if svc.Service == "bar" {
t.Fatalf("bad: %#v", info.Services)
}
}
2015-06-11 20:23:49 +00:00
if !found {
t.Fatalf("bad: %#v", info.Services)
}
}
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
// that we respect the version 8 ACL flag, since the test server sets
// that to false (the regression value of *not* changing this is better
// for now until we change the sense of the version 8 ACL flag).
2015-06-11 20:23:49 +00:00
}
2015-06-11 20:23:49 +00:00
func TestInternal_NodeDump_FilterACL(t *testing.T) {
t.Parallel()
dir, token, srv, codec := testACLFilterServer(t)
2015-06-11 20:23:49 +00:00
defer os.RemoveAll(dir)
defer srv.Shutdown()
defer codec.Close()
2015-06-11 20:23:49 +00:00
opt := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Token: token},
}
reply := structs.IndexedNodeDump{}
if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil {
2015-06-11 20:23:49 +00:00
t.Fatalf("err: %s", err)
}
for _, info := range reply.Dump {
found := false
for _, chk := range info.Checks {
if chk.ServiceName == "foo" {
found = true
}
2015-06-11 20:23:49 +00:00
if chk.ServiceName == "bar" {
t.Fatalf("bad: %#v", info.Checks)
}
2015-06-11 20:23:49 +00:00
}
if !found {
t.Fatalf("bad: %#v", info.Checks)
}
2015-06-11 20:23:49 +00:00
found = false
for _, svc := range info.Services {
if svc.Service == "foo" {
found = true
}
2015-06-11 20:23:49 +00:00
if svc.Service == "bar" {
t.Fatalf("bad: %#v", info.Services)
}
}
2015-06-11 20:23:49 +00:00
if !found {
t.Fatalf("bad: %#v", info.Services)
}
}
// We've already proven that we call the ACL filtering function so we
// test node filtering down in acl.go for node cases. This also proves
// that we respect the version 8 ACL flag, since the test server sets
// that to false (the regression value of *not* changing this is better
// for now until we change the sense of the version 8 ACL flag).
}
2015-06-18 01:57:17 +00:00
func TestInternal_EventFire_Token(t *testing.T) {
t.Parallel()
2015-06-18 01:57:17 +00:00
dir, srv := testServerWithConfig(t, func(c *Config) {
c.ACLDatacenter = "dc1"
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
c.ACLsEnabled = true
2015-06-18 01:57:17 +00:00
c.ACLMasterToken = "root"
c.ACLDownPolicy = "deny"
c.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir)
defer srv.Shutdown()
codec := rpcClient(t, srv)
defer codec.Close()
2015-06-18 01:57:17 +00:00
testrpc.WaitForLeader(t, srv.RPC, "dc1")
2015-06-18 01:57:17 +00:00
// No token is rejected
event := structs.EventFireRequest{
Name: "foo",
Datacenter: "dc1",
Payload: []byte("nope"),
}
err := msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil)
if !acl.IsErrPermissionDenied(err) {
2015-06-18 01:57:17 +00:00
t.Fatalf("bad: %s", err)
}
// Root token is allowed to fire
event.Token = "root"
err = msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil)
2015-06-18 01:57:17 +00:00
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestInternal_ServiceDump(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
doRequest := func(t *testing.T, filter string) structs.CheckServiceNodes {
t.Helper()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Filter: filter},
}
var out structs.IndexedCheckServiceNodes
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out))
return out.Nodes
}
// Run the tests against the test server
t.Run("No Filter", func(t *testing.T) {
nodes := doRequest(t, "")
// redis (3), web (3), critical (1), warning (1) and consul (1)
require.Len(t, nodes, 9)
})
t.Run("Filter Node foo and service version 1", func(t *testing.T) {
nodes := doRequest(t, "Node.Node == foo and Service.Meta.version == 1")
require.Len(t, nodes, 1)
require.Equal(t, "redis", nodes[0].Service.Service)
require.Equal(t, "redisV1", nodes[0].Service.ID)
})
t.Run("Filter service web", func(t *testing.T) {
nodes := doRequest(t, "Service.Service == web")
require.Len(t, nodes, 3)
})
}
func TestInternal_ServiceDump_Kind(t *testing.T) {
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
registerTestCatalogProxyEntries(t, codec)
doRequest := func(t *testing.T, kind structs.ServiceKind) structs.CheckServiceNodes {
t.Helper()
args := structs.ServiceDumpRequest{
Datacenter: "dc1",
ServiceKind: kind,
UseServiceKind: true,
}
var out structs.IndexedCheckServiceNodes
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out))
return out.Nodes
}
// Run the tests against the test server
t.Run("Typical", func(t *testing.T) {
nodes := doRequest(t, structs.ServiceKindTypical)
// redis (3), web (3), critical (1), warning (1) and consul (1)
require.Len(t, nodes, 9)
})
t.Run("Terminating Gateway", func(t *testing.T) {
nodes := doRequest(t, structs.ServiceKindTerminatingGateway)
require.Len(t, nodes, 1)
require.Equal(t, "tg-gw", nodes[0].Service.Service)
require.Equal(t, "tg-gw-01", nodes[0].Service.ID)
})
t.Run("Mesh Gateway", func(t *testing.T) {
nodes := doRequest(t, structs.ServiceKindMeshGateway)
require.Len(t, nodes, 1)
require.Equal(t, "mg-gw", nodes[0].Service.Service)
require.Equal(t, "mg-gw-01", nodes[0].Service.ID)
})
t.Run("Connect Proxy", func(t *testing.T) {
nodes := doRequest(t, structs.ServiceKindConnectProxy)
require.Len(t, nodes, 1)
require.Equal(t, "web-proxy", nodes[0].Service.Service)
require.Equal(t, "web-proxy", nodes[0].Service.ID)
})
}