2014-08-11 21:01:45 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
"reflect"
|
2016-12-09 00:01:01 +00:00
|
|
|
"strings"
|
2014-08-11 21:01:45 +00:00
|
|
|
"testing"
|
2017-05-04 22:35:33 +00:00
|
|
|
"time"
|
2014-08-11 21:01:45 +00:00
|
|
|
|
2014-08-11 21:18:51 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2017-04-29 16:34:02 +00:00
|
|
|
"github.com/hashicorp/consul/testutil/retry"
|
2014-08-11 21:01:45 +00:00
|
|
|
)
|
|
|
|
|
2016-12-09 00:01:01 +00:00
|
|
|
var testACLPolicy = `
|
|
|
|
key "" {
|
|
|
|
policy = "deny"
|
|
|
|
}
|
|
|
|
key "foo/" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`
|
|
|
|
|
2014-08-11 21:01:45 +00:00
|
|
|
func TestACL_Disabled(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:01:45 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:01:45 +00:00
|
|
|
|
|
|
|
acl, err := s1.resolveToken("does not exist")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl != nil {
|
|
|
|
t.Fatalf("got acl")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-12 17:58:02 +00:00
|
|
|
func TestACL_ResolveRootACL(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-12 17:58:02 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
acl, err := s1.resolveToken("allow")
|
|
|
|
if err == nil || err.Error() != rootDenied {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl != nil {
|
|
|
|
t.Fatalf("bad: %v", acl)
|
|
|
|
}
|
|
|
|
|
|
|
|
acl, err = s1.resolveToken("deny")
|
|
|
|
if err == nil || err.Error() != rootDenied {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl != nil {
|
|
|
|
t.Fatalf("bad: %v", acl)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-11 21:01:45 +00:00
|
|
|
func TestACL_Authority_NotFound(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:01:45 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:01:45 +00:00
|
|
|
|
|
|
|
acl, err := s1.resolveToken("does not exist")
|
|
|
|
if err == nil || err.Error() != aclNotFound {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl != nil {
|
|
|
|
t.Fatalf("got acl")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_Authority_Found(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:01:45 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
2014-08-12 22:32:44 +00:00
|
|
|
c.ACLMasterToken = "root"
|
2014-08-11 21:01:45 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:01:45 +00:00
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
2014-08-12 22:32:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
2014-08-11 21:01:45 +00:00
|
|
|
}
|
|
|
|
var id string
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
2014-08-11 21:01:45 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resolve the token
|
|
|
|
acl, err := s1.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
}
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-11 21:54:18 +00:00
|
|
|
func TestACL_Authority_Anonymous_Found(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:54:18 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:54:18 +00:00
|
|
|
|
|
|
|
// Resolve the token
|
|
|
|
acl, err := s1.resolveToken("")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy, should allow all
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_Authority_Master_Found(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:54:18 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLMasterToken = "foobar"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:54:18 +00:00
|
|
|
|
|
|
|
// Resolve the token
|
|
|
|
acl, err := s1.resolveToken("foobar")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy, should allow all
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-12 17:38:57 +00:00
|
|
|
func TestACL_Authority_Management(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-12 17:38:57 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLMasterToken = "foobar"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-12 17:38:57 +00:00
|
|
|
|
|
|
|
// Resolve the token
|
|
|
|
acl, err := s1.resolveToken("foobar")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy, should allow all
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-11 21:01:45 +00:00
|
|
|
func TestACL_NonAuthority_NotFound(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:01:45 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.Bootstrap = false // Disable bootstrap
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
|
2014-08-11 21:01:45 +00:00
|
|
|
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:01:45 +00:00
|
|
|
|
|
|
|
// find the non-authoritative server
|
|
|
|
var nonAuth *Server
|
|
|
|
if !s1.IsLeader() {
|
|
|
|
nonAuth = s1
|
|
|
|
} else {
|
|
|
|
nonAuth = s2
|
|
|
|
}
|
|
|
|
|
|
|
|
acl, err := nonAuth.resolveToken("does not exist")
|
|
|
|
if err == nil || err.Error() != aclNotFound {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl != nil {
|
|
|
|
t.Fatalf("got acl")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_NonAuthority_Found(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:01:45 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
2014-08-12 22:32:44 +00:00
|
|
|
c.ACLMasterToken = "root"
|
2014-08-11 21:01:45 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.Bootstrap = false // Disable bootstrap
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
|
2014-08-11 21:01:45 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:01:45 +00:00
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
2014-08-12 22:32:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
2014-08-11 21:01:45 +00:00
|
|
|
}
|
|
|
|
var id string
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
2014-08-11 21:01:45 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the non-authoritative server
|
|
|
|
var nonAuth *Server
|
|
|
|
if !s1.IsLeader() {
|
|
|
|
nonAuth = s1
|
|
|
|
} else {
|
|
|
|
nonAuth = s2
|
|
|
|
}
|
|
|
|
|
|
|
|
// Token should resolve
|
|
|
|
acl, err := nonAuth.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
}
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-12 17:54:56 +00:00
|
|
|
func TestACL_NonAuthority_Management(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-12 17:54:56 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLMasterToken = "foobar"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.Bootstrap = false // Disable bootstrap
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
|
2014-08-12 17:54:56 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-12 17:54:56 +00:00
|
|
|
|
|
|
|
// find the non-authoritative server
|
|
|
|
var nonAuth *Server
|
|
|
|
if !s1.IsLeader() {
|
|
|
|
nonAuth = s1
|
|
|
|
} else {
|
|
|
|
nonAuth = s2
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resolve the token
|
|
|
|
acl, err := nonAuth.resolveToken("foobar")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy, should allow all
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-11 21:18:51 +00:00
|
|
|
func TestACL_DownPolicy_Deny(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:18:51 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLDownPolicy = "deny"
|
2014-08-12 22:32:44 +00:00
|
|
|
c.ACLMasterToken = "root"
|
2014-08-11 21:18:51 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLDownPolicy = "deny"
|
|
|
|
c.Bootstrap = false // Disable bootstrap
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
|
2014-08-11 21:18:51 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:18:51 +00:00
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
2014-08-12 22:32:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
2014-08-11 21:18:51 +00:00
|
|
|
}
|
|
|
|
var id string
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
2014-08-11 21:18:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the non-authoritative server
|
|
|
|
var nonAuth *Server
|
|
|
|
var auth *Server
|
|
|
|
if !s1.IsLeader() {
|
|
|
|
nonAuth = s1
|
|
|
|
auth = s2
|
|
|
|
} else {
|
|
|
|
nonAuth = s2
|
|
|
|
auth = s1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the authoritative server
|
|
|
|
auth.Shutdown()
|
|
|
|
|
|
|
|
// Token should resolve into a DenyAll
|
|
|
|
aclR, err := nonAuth.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if aclR != acl.DenyAll() {
|
|
|
|
t.Fatalf("bad acl: %#v", aclR)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_DownPolicy_Allow(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:18:51 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLDownPolicy = "allow"
|
2014-08-12 22:32:44 +00:00
|
|
|
c.ACLMasterToken = "root"
|
2014-08-11 21:18:51 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLDownPolicy = "allow"
|
|
|
|
c.Bootstrap = false // Disable bootstrap
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
|
2014-08-11 21:18:51 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:18:51 +00:00
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
2014-08-12 22:32:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
2014-08-11 21:18:51 +00:00
|
|
|
}
|
|
|
|
var id string
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
2014-08-11 21:18:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the non-authoritative server
|
|
|
|
var nonAuth *Server
|
|
|
|
var auth *Server
|
|
|
|
if !s1.IsLeader() {
|
|
|
|
nonAuth = s1
|
|
|
|
auth = s2
|
|
|
|
} else {
|
|
|
|
nonAuth = s2
|
|
|
|
auth = s1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the authoritative server
|
|
|
|
auth.Shutdown()
|
|
|
|
|
|
|
|
// Token should resolve into a AllowAll
|
|
|
|
aclR, err := nonAuth.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if aclR != acl.AllowAll() {
|
|
|
|
t.Fatalf("bad acl: %#v", aclR)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_DownPolicy_ExtendCache(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:18:51 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLTTL = 0
|
|
|
|
c.ACLDownPolicy = "extend-cache"
|
2014-08-12 22:32:44 +00:00
|
|
|
c.ACLMasterToken = "root"
|
2014-08-11 21:18:51 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
c.ACLTTL = 0
|
|
|
|
c.ACLDownPolicy = "extend-cache"
|
|
|
|
c.Bootstrap = false // Disable bootstrap
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s1, 2)) })
|
2014-08-11 21:18:51 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-08-11 21:18:51 +00:00
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
2014-08-12 22:32:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
2014-08-11 21:18:51 +00:00
|
|
|
}
|
|
|
|
var id string
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
2014-08-11 21:18:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the non-authoritative server
|
|
|
|
var nonAuth *Server
|
|
|
|
var auth *Server
|
|
|
|
if !s1.IsLeader() {
|
|
|
|
nonAuth = s1
|
|
|
|
auth = s2
|
|
|
|
} else {
|
|
|
|
nonAuth = s2
|
|
|
|
auth = s1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Warm the caches
|
|
|
|
aclR, err := nonAuth.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if aclR == nil {
|
|
|
|
t.Fatalf("bad acl: %#v", aclR)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the authoritative server
|
|
|
|
auth.Shutdown()
|
|
|
|
|
|
|
|
// Token should resolve into cached copy
|
|
|
|
aclR2, err := nonAuth.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if aclR2 != aclR {
|
|
|
|
t.Fatalf("bad acl: %#v", aclR)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-05 00:59:08 +00:00
|
|
|
func TestACL_Replication(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-08-05 00:59:08 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLDownPolicy = "extend-cache"
|
|
|
|
c.ACLReplicationToken = "root"
|
2017-05-04 22:35:33 +00:00
|
|
|
c.ACLReplicationInterval = 10 * time.Millisecond
|
2016-08-05 00:59:08 +00:00
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc3"
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLDownPolicy = "deny"
|
|
|
|
c.ACLReplicationToken = "root"
|
2017-05-04 22:35:33 +00:00
|
|
|
c.ACLReplicationInterval = 10 * time.Millisecond
|
2016-08-05 00:59:08 +00:00
|
|
|
c.ACLReplicationApplyLimit = 1000000
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
|
|
|
joinWAN(t, s3, s1)
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc3")
|
2016-08-05 00:59:08 +00:00
|
|
|
|
|
|
|
// Create a new token.
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
|
|
|
}
|
|
|
|
var id string
|
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
// Wait for replication to occur.
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-01-24 08:00:06 +00:00
|
|
|
_, acl, err := s2.fsm.State().ACLGet(nil, id)
|
2016-08-05 00:59:08 +00:00
|
|
|
if err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2016-08-05 00:59:08 +00:00
|
|
|
}
|
|
|
|
if acl == nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(nil)
|
2016-08-05 00:59:08 +00:00
|
|
|
}
|
2017-01-24 08:00:06 +00:00
|
|
|
_, acl, err = s3.fsm.State().ACLGet(nil, id)
|
2016-08-05 00:59:08 +00:00
|
|
|
if err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2016-08-05 00:59:08 +00:00
|
|
|
}
|
|
|
|
if acl == nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(nil)
|
2016-08-05 00:59:08 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-08-05 00:59:08 +00:00
|
|
|
|
|
|
|
// Kill the ACL datacenter.
|
|
|
|
s1.Shutdown()
|
|
|
|
|
|
|
|
// Token should resolve on s2, which has replication + extend-cache.
|
|
|
|
acl, err := s2.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
}
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Although s3 has replication, and we verified that the ACL is there,
|
|
|
|
// it can not be used because of the down policy.
|
|
|
|
acl, err = s3.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy.
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
}
|
|
|
|
if acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-11 21:18:51 +00:00
|
|
|
func TestACL_MultiDC_Found(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-08-11 21:18:51 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
2014-08-12 22:32:44 +00:00
|
|
|
c.ACLMasterToken = "root"
|
2014-08-11 21:18:51 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
client := rpcClient(t, s1)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.ACLDatacenter = "dc1" // Enable ACLs!
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2014-08-11 21:18:51 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
2014-08-11 21:18:51 +00:00
|
|
|
|
|
|
|
// Create a new token
|
|
|
|
arg := structs.ACLRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.ACLSet,
|
|
|
|
ACL: structs.ACL{
|
|
|
|
Name: "User token",
|
|
|
|
Type: structs.ACLTypeClient,
|
|
|
|
Rules: testACLPolicy,
|
|
|
|
},
|
2014-08-12 22:32:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: "root"},
|
2014-08-11 21:18:51 +00:00
|
|
|
}
|
|
|
|
var id string
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
2014-08-11 21:18:51 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Token should resolve
|
|
|
|
acl, err := s2.resolveToken(id)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if acl == nil {
|
|
|
|
t.Fatalf("missing acl")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the policy
|
|
|
|
if acl.KeyRead("bar") {
|
|
|
|
t.Fatalf("unexpected read")
|
|
|
|
}
|
|
|
|
if !acl.KeyRead("foo/test") {
|
|
|
|
t.Fatalf("unexpected failed read")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 21:14:43 +00:00
|
|
|
func TestACL_filterHealthChecks(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-13 00:28:52 +00:00
|
|
|
// Create some health checks.
|
|
|
|
fill := func() structs.HealthChecks {
|
|
|
|
return structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
},
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 00:28:52 +00:00
|
|
|
// Try permissive filtering.
|
|
|
|
{
|
|
|
|
hc := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterHealthChecks(&hc)
|
|
|
|
if len(hc) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", hc)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 00:28:52 +00:00
|
|
|
// Try restrictive filtering.
|
|
|
|
{
|
|
|
|
hc := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterHealthChecks(&hc)
|
|
|
|
if len(hc) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", hc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allowed to see the service but not the node.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
service "foo" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This will work because version 8 ACLs aren't being enforced.
|
|
|
|
{
|
|
|
|
hc := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, false)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterHealthChecks(&hc)
|
|
|
|
if len(hc) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", hc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// But with version 8 the node will block it.
|
|
|
|
{
|
|
|
|
hc := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterHealthChecks(&hc)
|
|
|
|
if len(hc) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", hc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on access to the node.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
node "node1" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now it should go through.
|
|
|
|
{
|
|
|
|
hc := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterHealthChecks(&hc)
|
|
|
|
if len(hc) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", hc)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_filterServices(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2015-06-11 21:14:43 +00:00
|
|
|
// Create some services
|
|
|
|
services := structs.Services{
|
|
|
|
"service1": []string{},
|
|
|
|
"service2": []string{},
|
2017-03-23 23:10:50 +00:00
|
|
|
"consul": []string{},
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2017-03-23 23:10:50 +00:00
|
|
|
// Try permissive filtering.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2015-06-11 21:14:43 +00:00
|
|
|
filt.filterServices(services)
|
2017-03-23 23:10:50 +00:00
|
|
|
if len(services) != 3 {
|
2015-06-11 21:14:43 +00:00
|
|
|
t.Fatalf("bad: %#v", services)
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:10:50 +00:00
|
|
|
// Try restrictive filtering.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, false)
|
2015-06-11 21:14:43 +00:00
|
|
|
filt.filterServices(services)
|
2017-03-23 23:10:50 +00:00
|
|
|
if len(services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", services)
|
|
|
|
}
|
|
|
|
if _, ok := services["consul"]; !ok {
|
|
|
|
t.Fatalf("bad: %#v", services)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering with version 8 enforcement.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, true)
|
2017-03-23 23:10:50 +00:00
|
|
|
filt.filterServices(services)
|
2015-06-11 21:14:43 +00:00
|
|
|
if len(services) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", services)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_filterServiceNodes(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-11 21:22:14 +00:00
|
|
|
// Create some service nodes.
|
|
|
|
fill := func() structs.ServiceNodes {
|
|
|
|
return structs.ServiceNodes{
|
|
|
|
&structs.ServiceNode{
|
|
|
|
Node: "node1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
},
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-11 21:22:14 +00:00
|
|
|
// Try permissive filtering.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-11 21:22:14 +00:00
|
|
|
filt.filterServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-11 21:22:14 +00:00
|
|
|
// Try restrictive filtering.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-11 21:22:14 +00:00
|
|
|
filt.filterServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allowed to see the service but not the node.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
service "foo" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-12-13 00:28:52 +00:00
|
|
|
// This will work because version 8 ACLs aren't being enforced.
|
2016-12-11 21:22:14 +00:00
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, false)
|
2016-12-11 21:22:14 +00:00
|
|
|
filt.filterServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// But with version 8 the node will block it.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-11 21:22:14 +00:00
|
|
|
filt.filterServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on access to the node.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
node "node1" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now it should go through.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-11 21:22:14 +00:00
|
|
|
filt.filterServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_filterNodeServices(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-13 00:53:31 +00:00
|
|
|
// Create some node services.
|
|
|
|
fill := func() *structs.NodeServices {
|
|
|
|
return &structs.NodeServices{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Node: "node1",
|
2015-06-11 21:14:43 +00:00
|
|
|
},
|
2016-12-13 00:53:31 +00:00
|
|
|
Services: map[string]*structs.NodeService{
|
|
|
|
"foo": &structs.NodeService{
|
|
|
|
ID: "foo",
|
|
|
|
Service: "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 00:53:31 +00:00
|
|
|
// Try nil, which is a possible input.
|
|
|
|
{
|
|
|
|
var services *structs.NodeServices
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-13 00:53:31 +00:00
|
|
|
filt.filterNodeServices(&services)
|
|
|
|
if services != nil {
|
|
|
|
t.Fatalf("bad: %#v", services)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 00:53:31 +00:00
|
|
|
// Try permissive filtering.
|
|
|
|
{
|
|
|
|
services := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-13 00:53:31 +00:00
|
|
|
filt.filterNodeServices(&services)
|
|
|
|
if len(services.Services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", services.Services)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering.
|
|
|
|
{
|
|
|
|
services := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-13 00:53:31 +00:00
|
|
|
filt.filterNodeServices(&services)
|
|
|
|
if len((*services).Services) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", (*services).Services)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allowed to see the service but not the node.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
service "foo" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This will work because version 8 ACLs aren't being enforced.
|
|
|
|
{
|
|
|
|
services := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, false)
|
2016-12-13 00:53:31 +00:00
|
|
|
filt.filterNodeServices(&services)
|
|
|
|
if len((*services).Services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", (*services).Services)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// But with version 8 the node will block it.
|
|
|
|
{
|
|
|
|
services := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 00:53:31 +00:00
|
|
|
filt.filterNodeServices(&services)
|
|
|
|
if services != nil {
|
|
|
|
t.Fatalf("bad: %#v", services)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on access to the node.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
node "node1" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now it should go through.
|
|
|
|
{
|
|
|
|
services := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 00:53:31 +00:00
|
|
|
filt.filterNodeServices(&services)
|
|
|
|
if len((*services).Services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", (*services).Services)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestACL_filterCheckServiceNodes(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-13 00:28:52 +00:00
|
|
|
// Create some nodes.
|
|
|
|
fill := func() structs.CheckServiceNodes {
|
|
|
|
return structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Node: "node1",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "foo",
|
|
|
|
Service: "foo",
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
},
|
2015-06-11 21:14:43 +00:00
|
|
|
},
|
|
|
|
},
|
2016-12-13 00:28:52 +00:00
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 00:28:52 +00:00
|
|
|
// Try permissive filtering.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterCheckServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
if len(nodes[0].Checks) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes[0].Checks)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
2016-12-13 00:28:52 +00:00
|
|
|
|
|
|
|
// Try restrictive filtering.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterCheckServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 00:28:52 +00:00
|
|
|
// Allowed to see the service but not the node.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
service "foo" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This will work because version 8 ACLs aren't being enforced.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, false)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterCheckServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
if len(nodes[0].Checks) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes[0].Checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// But with version 8 the node will block it.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterCheckServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on access to the node.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
node "node1" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now it should go through.
|
|
|
|
{
|
|
|
|
nodes := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 00:28:52 +00:00
|
|
|
filt.filterCheckServiceNodes(&nodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
if len(nodes[0].Checks) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", nodes[0].Checks)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 19:58:31 +00:00
|
|
|
func TestACL_filterCoordinates(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-12 19:58:31 +00:00
|
|
|
// Create some coordinates.
|
|
|
|
coords := structs.Coordinates{
|
|
|
|
&structs.Coordinate{
|
|
|
|
Node: "node1",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
},
|
|
|
|
&structs.Coordinate{
|
|
|
|
Node: "node2",
|
|
|
|
Coord: generateRandomCoordinate(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try permissive filtering.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-12 19:58:31 +00:00
|
|
|
filt.filterCoordinates(&coords)
|
|
|
|
if len(coords) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", coords)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering without version 8 ACL enforcement.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-12 19:58:31 +00:00
|
|
|
filt.filterCoordinates(&coords)
|
|
|
|
if len(coords) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", coords)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering with version 8 ACL enforcement.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, true)
|
2016-12-12 19:58:31 +00:00
|
|
|
filt.filterCoordinates(&coords)
|
|
|
|
if len(coords) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", coords)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-13 05:59:22 +00:00
|
|
|
func TestACL_filterSessions(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-13 05:59:22 +00:00
|
|
|
// Create a session list.
|
|
|
|
sessions := structs.Sessions{
|
|
|
|
&structs.Session{
|
|
|
|
Node: "foo",
|
|
|
|
},
|
|
|
|
&structs.Session{
|
|
|
|
Node: "bar",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try permissive filtering.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, true)
|
2016-12-13 05:59:22 +00:00
|
|
|
filt.filterSessions(&sessions)
|
|
|
|
if len(sessions) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", sessions)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering but with version 8 enforcement turned off.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-13 05:59:22 +00:00
|
|
|
filt.filterSessions(&sessions)
|
|
|
|
if len(sessions) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", sessions)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering with version 8 enforcement turned on.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, true)
|
2016-12-13 05:59:22 +00:00
|
|
|
filt.filterSessions(&sessions)
|
|
|
|
if len(sessions) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", sessions)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 21:14:43 +00:00
|
|
|
func TestACL_filterNodeDump(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-13 02:21:00 +00:00
|
|
|
// Create a node dump.
|
|
|
|
fill := func() structs.NodeDump {
|
|
|
|
return structs.NodeDump{
|
|
|
|
&structs.NodeInfo{
|
|
|
|
Node: "node1",
|
|
|
|
Services: []*structs.NodeService{
|
|
|
|
&structs.NodeService{
|
|
|
|
ID: "foo",
|
|
|
|
Service: "foo",
|
|
|
|
},
|
2015-06-11 21:14:43 +00:00
|
|
|
},
|
2016-12-13 02:21:00 +00:00
|
|
|
Checks: []*structs.HealthCheck{
|
|
|
|
&structs.HealthCheck{
|
|
|
|
Node: "node1",
|
|
|
|
CheckID: "check1",
|
|
|
|
ServiceName: "foo",
|
|
|
|
},
|
2015-06-11 21:14:43 +00:00
|
|
|
},
|
|
|
|
},
|
2016-12-13 02:21:00 +00:00
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:21:00 +00:00
|
|
|
// Try permissive filtering.
|
|
|
|
{
|
|
|
|
dump := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, false)
|
2016-12-13 02:21:00 +00:00
|
|
|
filt.filterNodeDump(&dump)
|
|
|
|
if len(dump) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump)
|
|
|
|
}
|
|
|
|
if len(dump[0].Services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Services)
|
|
|
|
}
|
|
|
|
if len(dump[0].Checks) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering.
|
|
|
|
{
|
|
|
|
dump := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-13 02:21:00 +00:00
|
|
|
filt.filterNodeDump(&dump)
|
|
|
|
if len(dump) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump)
|
|
|
|
}
|
|
|
|
if len(dump[0].Services) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Services)
|
|
|
|
}
|
|
|
|
if len(dump[0].Checks) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Checks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allowed to see the service but not the node.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
service "foo" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
2016-12-13 02:21:00 +00:00
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
2016-12-13 02:21:00 +00:00
|
|
|
|
|
|
|
// This will work because version 8 ACLs aren't being enforced.
|
|
|
|
{
|
|
|
|
dump := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, false)
|
2016-12-13 02:21:00 +00:00
|
|
|
filt.filterNodeDump(&dump)
|
|
|
|
if len(dump) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump)
|
|
|
|
}
|
|
|
|
if len(dump[0].Services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Services)
|
|
|
|
}
|
|
|
|
if len(dump[0].Checks) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Checks)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:21:00 +00:00
|
|
|
// But with version 8 the node will block it.
|
|
|
|
{
|
|
|
|
dump := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 02:21:00 +00:00
|
|
|
filt.filterNodeDump(&dump)
|
|
|
|
if len(dump) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", dump)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on access to the node.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
node "node1" {
|
|
|
|
policy = "read"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
2016-12-13 02:21:00 +00:00
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
2016-12-13 02:21:00 +00:00
|
|
|
|
|
|
|
// Now it should go through.
|
|
|
|
{
|
|
|
|
dump := fill()
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(perms, nil, true)
|
2016-12-13 02:21:00 +00:00
|
|
|
filt.filterNodeDump(&dump)
|
|
|
|
if len(dump) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump)
|
|
|
|
}
|
|
|
|
if len(dump[0].Services) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Services)
|
|
|
|
}
|
|
|
|
if len(dump[0].Checks) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", dump[0].Checks)
|
|
|
|
}
|
2015-06-11 21:14:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 00:00:11 +00:00
|
|
|
func TestACL_filterNodes(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-11 00:00:11 +00:00
|
|
|
// Create a nodes list.
|
|
|
|
nodes := structs.Nodes{
|
|
|
|
&structs.Node{
|
|
|
|
Node: "foo",
|
|
|
|
},
|
|
|
|
&structs.Node{
|
|
|
|
Node: "bar",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try permissive filtering.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.AllowAll(), nil, true)
|
2016-12-11 00:00:11 +00:00
|
|
|
filt.filterNodes(&nodes)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering but with version 8 enforcement turned off.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, false)
|
2016-12-11 00:00:11 +00:00
|
|
|
filt.filterNodes(&nodes)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try restrictive filtering with version 8 enforcement turned on.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, true)
|
2016-12-11 00:00:11 +00:00
|
|
|
filt.filterNodes(&nodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", nodes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-26 23:59:00 +00:00
|
|
|
func TestACL_redactPreparedQueryTokens(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-02-26 23:59:00 +00:00
|
|
|
query := &structs.PreparedQuery{
|
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d1",
|
|
|
|
Token: "root",
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := &structs.PreparedQuery{
|
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d1",
|
|
|
|
Token: "root",
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try permissive filtering with a management token. This will allow the
|
|
|
|
// embedded token to be seen.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.ManageAll(), nil, false)
|
2016-02-26 23:59:00 +00:00
|
|
|
filt.redactPreparedQueryTokens(&query)
|
|
|
|
if !reflect.DeepEqual(query, expected) {
|
|
|
|
t.Fatalf("bad: %#v", &query)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hang on to the entry with a token, which needs to survive the next
|
|
|
|
// operation.
|
|
|
|
original := query
|
|
|
|
|
|
|
|
// Now try permissive filtering with a client token, which should cause
|
|
|
|
// the embedded token to get redacted.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.AllowAll(), nil, false)
|
2016-02-26 23:59:00 +00:00
|
|
|
filt.redactPreparedQueryTokens(&query)
|
|
|
|
expected.Token = redactedToken
|
|
|
|
if !reflect.DeepEqual(query, expected) {
|
|
|
|
t.Fatalf("bad: %#v", *query)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that the original object didn't lose its token.
|
|
|
|
if original.Token != "root" {
|
|
|
|
t.Fatalf("bad token: %s", original.Token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
func TestACL_filterPreparedQueries(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
queries := structs.PreparedQueries{
|
|
|
|
&structs.PreparedQuery{
|
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d1",
|
|
|
|
},
|
|
|
|
&structs.PreparedQuery{
|
2016-02-24 09:26:16 +00:00
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d2",
|
|
|
|
Name: "query-with-no-token",
|
|
|
|
},
|
|
|
|
&structs.PreparedQuery{
|
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d3",
|
|
|
|
Name: "query-with-a-token",
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := structs.PreparedQueries{
|
|
|
|
&structs.PreparedQuery{
|
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d1",
|
|
|
|
},
|
|
|
|
&structs.PreparedQuery{
|
2016-02-24 09:26:16 +00:00
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d2",
|
|
|
|
Name: "query-with-no-token",
|
|
|
|
},
|
|
|
|
&structs.PreparedQuery{
|
|
|
|
ID: "f004177f-2c28-83b7-4229-eacc25fe55d3",
|
|
|
|
Name: "query-with-a-token",
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
Token: "root",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try permissive filtering with a management token. This will allow the
|
2016-02-24 09:26:16 +00:00
|
|
|
// embedded token to be seen.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt := newACLFilter(acl.ManageAll(), nil, false)
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
filt.filterPreparedQueries(&queries)
|
|
|
|
if !reflect.DeepEqual(queries, expected) {
|
|
|
|
t.Fatalf("bad: %#v", queries)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hang on to the entry with a token, which needs to survive the next
|
|
|
|
// operation.
|
2016-02-24 09:26:16 +00:00
|
|
|
original := queries[2]
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
|
|
|
|
// Now try permissive filtering with a client token, which should cause
|
2016-02-24 09:26:16 +00:00
|
|
|
// the embedded token to get redacted, and the query with no name to get
|
|
|
|
// filtered out.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.AllowAll(), nil, false)
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
filt.filterPreparedQueries(&queries)
|
2016-02-24 09:26:16 +00:00
|
|
|
expected[2].Token = redactedToken
|
|
|
|
expected = append(structs.PreparedQueries{}, expected[1], expected[2])
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
if !reflect.DeepEqual(queries, expected) {
|
|
|
|
t.Fatalf("bad: %#v", queries)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that the original object didn't lose its token.
|
|
|
|
if original.Token != "root" {
|
|
|
|
t.Fatalf("bad token: %s", original.Token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now try restrictive filtering.
|
2017-04-21 00:02:42 +00:00
|
|
|
filt = newACLFilter(acl.DenyAll(), nil, false)
|
Creates new "prepared-query" ACL type and new token capture behavior.
Prior to this change, prepared queries had the following behavior for
ACLs, which will need to change to support templates:
1. A management token, or a token with read access to the service being
queried needed to be provided in order to create a prepared query.
2. The token used to create the prepared query was stored with the query
in the state store and used to execute the query.
3. A management token, or the token used to create the query needed to be
supplied to perform and CRUD operations on an existing prepared query.
This was pretty subtle and complicated behavior, and won't work for
templates since the service name is computed at execution time. To solve
this, we introduce a new "prepared-query" ACL type, where the prefix
applies to the query name for static prepared query types and to the
prefix for template prepared query types.
With this change, the new behavior is:
1. A management token, or a token with "prepared-query" write access to
the query name or (soon) the given template prefix is required to do
any CRUD operations on a prepared query, or to list prepared queries
(the list is filtered by this ACL).
2. You will no longer need a management token to list prepared queries,
but you will only be able to see prepared queries that you have access
to (you get an empty list instead of permission denied).
3. When listing or getting a query, because it was easy to capture
management tokens given the past behavior, this will always blank out
the "Token" field (replacing the contents as <hidden>) for all tokens
unless a management token is supplied. Going forward, we should
discourage people from binding tokens for execution unless strictly
necessary.
4. No token will be captured by default when a prepared query is created.
If the user wishes to supply an execution token then can pass it in via
the "Token" field in the prepared query definition. Otherwise, this
field will default to empty.
5. At execution time, we will use the captured token if it exists with the
prepared query definition, otherwise we will use the token that's passed
in with the request, just like we do for other RPCs (or you can use the
agent's configured token for DNS).
6. Prepared queries with no name (accessible only by ID) will not require
ACLs to create or modify (execution time will depend on the service ACL
configuration). Our argument here is that these are designed to be
ephemeral and the IDs are as good as an ACL. Management tokens will be
able to list all of these.
These changes enable templates, but also enable delegation of authority to
manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
|
|
|
filt.filterPreparedQueries(&queries)
|
|
|
|
if len(queries) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", queries)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 23:46:15 +00:00
|
|
|
func TestACL_unhandledFilterType(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2015-06-11 23:46:15 +00:00
|
|
|
defer func(t *testing.T) {
|
|
|
|
if recover() == nil {
|
|
|
|
t.Fatalf("should panic")
|
|
|
|
}
|
|
|
|
}(t)
|
|
|
|
|
|
|
|
// Create the server
|
|
|
|
dir, token, srv, client := testACLFilterServer(t)
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
defer srv.Shutdown()
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
// Pass an unhandled type into the ACL filter.
|
|
|
|
srv.filterACL(token, &structs.HealthCheck{})
|
|
|
|
}
|
|
|
|
|
2016-12-09 00:01:01 +00:00
|
|
|
func TestACL_vetRegisterWithACL(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-09 00:01:01 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Node: "nope",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
}
|
|
|
|
|
|
|
|
// With a nil ACL, the update should be allowed.
|
|
|
|
if err := vetRegisterWithACL(nil, args, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a basic node policy.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
node "node" {
|
|
|
|
policy = "write"
|
2014-08-11 21:01:45 +00:00
|
|
|
}
|
2016-12-09 00:01:01 +00:00
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With that policy, the update should now be blocked for node reasons.
|
|
|
|
err = vetRegisterWithACL(perms, args, nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now use a permitted node name.
|
|
|
|
args.Node = "node"
|
|
|
|
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build some node info that matches what we have now.
|
|
|
|
ns := &structs.NodeServices{
|
|
|
|
Node: &structs.Node{
|
|
|
|
Node: "node",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
},
|
|
|
|
Services: make(map[string]*structs.NodeService),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to register a service, which should be blocked.
|
|
|
|
args.Service = &structs.NodeService{
|
|
|
|
Service: "service",
|
|
|
|
ID: "my-id",
|
|
|
|
}
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on a basic service policy.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
service "service" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the service ACL, the update should go through.
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add an existing service that they are clobbering and aren't allowed
|
|
|
|
// to write to.
|
|
|
|
ns.Services["my-id"] = &structs.NodeService{
|
|
|
|
Service: "other",
|
|
|
|
ID: "my-id",
|
|
|
|
}
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on a policy that allows them to write to the other service.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
service "other" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now it should go through.
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try creating the node and the service at once by having no existing
|
|
|
|
// node record. This should be ok since we have node and service
|
|
|
|
// permissions.
|
|
|
|
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a node-level check to the member, which should be rejected.
|
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
}
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "check member must be nil") {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move the check into the slice, but give a bad node name.
|
|
|
|
args.Check.Node = "nope"
|
|
|
|
args.Checks = append(args.Checks, args.Check)
|
|
|
|
args.Check = nil
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "doesn't match register request node") {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix the node name, which should now go through.
|
|
|
|
args.Checks[0].Node = "node"
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a service-level check.
|
|
|
|
args.Checks = append(args.Checks, &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
ServiceID: "my-id",
|
|
|
|
})
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try creating everything at once. This should be ok since we have all
|
|
|
|
// the permissions we need. It also makes sure that we can register a
|
|
|
|
// new node, service, and associated checks.
|
|
|
|
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nil out the service registration, which'll skip the special case
|
|
|
|
// and force us to look at the ns data (it will look like we are
|
|
|
|
// writing to the "other" service which also has "my-id").
|
|
|
|
args.Service = nil
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on a policy that forbids them to write to the other service.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
service "other" {
|
|
|
|
policy = "deny"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This should get rejected.
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change the existing service data to point to a service name they
|
|
|
|
// car write to. This should go through.
|
|
|
|
ns.Services["my-id"] = &structs.NodeService{
|
|
|
|
Service: "service",
|
|
|
|
ID: "my-id",
|
|
|
|
}
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chain on a policy that forbids them to write to the node.
|
|
|
|
policy, err = acl.Parse(`
|
|
|
|
node "node" {
|
|
|
|
policy = "deny"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err = acl.New(perms, policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This should get rejected because there's a node-level check in here.
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change the node-level check into a service check, and then it should
|
|
|
|
// go through.
|
|
|
|
args.Checks[0].ServiceID = "my-id"
|
|
|
|
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, attempt to update the node part of the data and make sure
|
|
|
|
// that gets rejected since they no longer have permissions.
|
|
|
|
args.Address = "127.0.0.2"
|
|
|
|
err = vetRegisterWithACL(perms, args, ns)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
2014-08-11 21:01:45 +00:00
|
|
|
}
|
2016-12-10 03:15:44 +00:00
|
|
|
|
|
|
|
func TestACL_vetDeregisterWithACL(t *testing.T) {
|
2017-05-22 22:14:27 +00:00
|
|
|
t.Parallel()
|
2016-12-10 03:15:44 +00:00
|
|
|
args := &structs.DeregisterRequest{
|
|
|
|
Node: "nope",
|
|
|
|
}
|
|
|
|
|
|
|
|
// With a nil ACL, the update should be allowed.
|
|
|
|
if err := vetDeregisterWithACL(nil, args, nil, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a basic node policy.
|
|
|
|
policy, err := acl.Parse(`
|
|
|
|
node "node" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
service "service" {
|
|
|
|
policy = "write"
|
|
|
|
}
|
|
|
|
`)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err %v", err)
|
|
|
|
}
|
|
|
|
perms, err := acl.New(acl.DenyAll(), policy)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With that policy, the update should now be blocked for node reasons.
|
|
|
|
err = vetDeregisterWithACL(perms, args, nil, nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now use a permitted node name.
|
|
|
|
args.Node = "node"
|
|
|
|
if err := vetDeregisterWithACL(perms, args, nil, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try an unknown check.
|
|
|
|
args.CheckID = "check-id"
|
|
|
|
err = vetDeregisterWithACL(perms, args, nil, nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Unknown check") {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now pass in a check that should be blocked.
|
|
|
|
nc := &structs.HealthCheck{
|
|
|
|
Node: "node",
|
|
|
|
CheckID: "check-id",
|
|
|
|
ServiceID: "service-id",
|
|
|
|
ServiceName: "nope",
|
|
|
|
}
|
|
|
|
err = vetDeregisterWithACL(perms, args, nil, nc)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change it to an allowed service, which should go through.
|
|
|
|
nc.ServiceName = "service"
|
|
|
|
if err := vetDeregisterWithACL(perms, args, nil, nc); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch to a node check that should be blocked.
|
|
|
|
args.Node = "nope"
|
|
|
|
nc.Node = "nope"
|
|
|
|
nc.ServiceID = ""
|
|
|
|
nc.ServiceName = ""
|
|
|
|
err = vetDeregisterWithACL(perms, args, nil, nc)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch to an allowed node check, which should go through.
|
|
|
|
args.Node = "node"
|
|
|
|
nc.Node = "node"
|
|
|
|
if err := vetDeregisterWithACL(perms, args, nil, nc); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try an unknown service.
|
|
|
|
args.ServiceID = "service-id"
|
|
|
|
err = vetDeregisterWithACL(perms, args, nil, nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Unknown service") {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now pass in a service that should be blocked.
|
|
|
|
ns := &structs.NodeService{
|
|
|
|
ID: "service-id",
|
|
|
|
Service: "nope",
|
|
|
|
}
|
|
|
|
err = vetDeregisterWithACL(perms, args, ns, nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
t.Fatalf("bad: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change it to an allowed service, which should go through.
|
|
|
|
ns.Service = "service"
|
|
|
|
if err := vetDeregisterWithACL(perms, args, ns, nil); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|