From 838591c9168aef736e948c8e7235be3ccb715579 Mon Sep 17 00:00:00 2001 From: James Phillips Date: Sun, 16 Jul 2017 21:12:16 -0700 Subject: [PATCH] Changes remote exec KV read to call GetTokenForAgent(). (#3283) * Changes remote exec KV read to call GetTokenForAgent(), which can use the acl_agent_token instead of the acl_token. Fixes #3160. * Fixes remote exec unit test with ACLs. * Adds unhappy ACL path to unit tests for remote exec. --- agent/remote_exec.go | 4 +- agent/remote_exec_test.go | 123 ++++++++++++++++------ website/source/docs/agent/options.html.md | 8 +- website/source/docs/guides/acl.html.md | 42 ++++++-- 4 files changed, 131 insertions(+), 46 deletions(-) diff --git a/agent/remote_exec.go b/agent/remote_exec.go index 333f87455..9ea6eedbf 100644 --- a/agent/remote_exec.go +++ b/agent/remote_exec.go @@ -243,7 +243,7 @@ func (a *Agent) remoteExecGetSpec(event *remoteExecEvent, spec *remoteExecSpec) AllowStale: true, // Stale read for scale! Retry on failure. }, } - get.Token = a.config.ACLToken + get.Token = a.config.GetTokenForAgent() var out structs.IndexedDirEntries QUERY: if err := a.RPC("KVS.Get", &get, &out); err != nil { @@ -310,7 +310,7 @@ func (a *Agent) remoteExecWriteKey(event *remoteExecEvent, suffix string, val [] Session: event.Session, }, } - write.Token = a.config.ACLToken + write.Token = a.config.GetTokenForAgent() var success bool if err := a.RPC("KVS.Apply", &write, &success); err != nil { return err diff --git a/agent/remote_exec_test.go b/agent/remote_exec_test.go index e6ed178fe..3b161953a 100644 --- a/agent/remote_exec_test.go +++ b/agent/remote_exec_test.go @@ -95,27 +95,47 @@ func TestRexecWriter(t *testing.T) { func TestRemoteExecGetSpec(t *testing.T) { t.Parallel() - testRemoteExecGetSpec(t, nil) + testRemoteExecGetSpec(t, nil, "", true) } func TestRemoteExecGetSpec_ACLToken(t *testing.T) { t.Parallel() cfg := TestConfig() cfg.ACLDatacenter = "dc1" + cfg.ACLMasterToken = "root" cfg.ACLToken = "root" cfg.ACLDefaultPolicy = "deny" - testRemoteExecGetSpec(t, cfg) + testRemoteExecGetSpec(t, cfg, "root", true) } -func testRemoteExecGetSpec(t *testing.T, c *Config) { - a := NewTestAgent(t.Name(), nil) +func TestRemoteExecGetSpec_ACLAgentToken(t *testing.T) { + t.Parallel() + cfg := TestConfig() + cfg.ACLDatacenter = "dc1" + cfg.ACLMasterToken = "root" + cfg.ACLAgentToken = "root" + cfg.ACLDefaultPolicy = "deny" + testRemoteExecGetSpec(t, cfg, "root", true) +} + +func TestRemoteExecGetSpec_ACLDeny(t *testing.T) { + t.Parallel() + cfg := TestConfig() + cfg.ACLDatacenter = "dc1" + cfg.ACLMasterToken = "root" + cfg.ACLDefaultPolicy = "deny" + testRemoteExecGetSpec(t, cfg, "root", false) +} + +func testRemoteExecGetSpec(t *testing.T, c *Config, token string, shouldSucceed bool) { + a := NewTestAgent(t.Name(), c) defer a.Shutdown() event := &remoteExecEvent{ Prefix: "_rexec", - Session: makeRexecSession(t, a.Agent), + Session: makeRexecSession(t, a.Agent, token), } - defer destroySession(t, a.Agent, event.Session) + defer destroySession(t, a.Agent, event.Session, token) spec := &remoteExecSpec{ Command: "uptime", @@ -127,78 +147,103 @@ func testRemoteExecGetSpec(t *testing.T, c *Config) { t.Fatalf("err: %v", err) } key := "_rexec/" + event.Session + "/job" - setKV(t, a.Agent, key, buf) + setKV(t, a.Agent, key, buf, token) var out remoteExecSpec - if !a.remoteExecGetSpec(event, &out) { + if shouldSucceed != a.remoteExecGetSpec(event, &out) { t.Fatalf("bad") } - if !reflect.DeepEqual(spec, &out) { + if shouldSucceed && !reflect.DeepEqual(spec, &out) { t.Fatalf("bad spec") } } func TestRemoteExecWrites(t *testing.T) { t.Parallel() - testRemoteExecWrites(t, nil) + testRemoteExecWrites(t, nil, "", true) } func TestRemoteExecWrites_ACLToken(t *testing.T) { t.Parallel() cfg := TestConfig() cfg.ACLDatacenter = "dc1" + cfg.ACLMasterToken = "root" cfg.ACLToken = "root" cfg.ACLDefaultPolicy = "deny" - testRemoteExecWrites(t, cfg) + testRemoteExecWrites(t, cfg, "root", true) } -func testRemoteExecWrites(t *testing.T, c *Config) { - a := NewTestAgent(t.Name(), nil) +func TestRemoteExecWrites_ACLAgentToken(t *testing.T) { + t.Parallel() + cfg := TestConfig() + cfg.ACLDatacenter = "dc1" + cfg.ACLMasterToken = "root" + cfg.ACLAgentToken = "root" + cfg.ACLDefaultPolicy = "deny" + testRemoteExecWrites(t, cfg, "root", true) +} + +func TestRemoteExecWrites_ACLDeny(t *testing.T) { + t.Parallel() + cfg := TestConfig() + cfg.ACLDatacenter = "dc1" + cfg.ACLMasterToken = "root" + cfg.ACLDefaultPolicy = "deny" + testRemoteExecWrites(t, cfg, "root", false) +} + +func testRemoteExecWrites(t *testing.T, c *Config, token string, shouldSucceed bool) { + a := NewTestAgent(t.Name(), c) defer a.Shutdown() event := &remoteExecEvent{ Prefix: "_rexec", - Session: makeRexecSession(t, a.Agent), + Session: makeRexecSession(t, a.Agent, token), } - defer destroySession(t, a.Agent, event.Session) + defer destroySession(t, a.Agent, event.Session, token) - if !a.remoteExecWriteAck(event) { + if shouldSucceed != a.remoteExecWriteAck(event) { t.Fatalf("bad") } output := []byte("testing") - if !a.remoteExecWriteOutput(event, 0, output) { + if shouldSucceed != a.remoteExecWriteOutput(event, 0, output) { t.Fatalf("bad") } - if !a.remoteExecWriteOutput(event, 10, output) { + if shouldSucceed != a.remoteExecWriteOutput(event, 10, output) { t.Fatalf("bad") } + // Bypass the remaining checks if the write was expected to fail. + if !shouldSucceed { + return + } + exitCode := 1 if !a.remoteExecWriteExitCode(event, &exitCode) { t.Fatalf("bad") } key := "_rexec/" + event.Session + "/" + a.Config.NodeName + "/ack" - d := getKV(t, a.Agent, key) + d := getKV(t, a.Agent, key, token) if d == nil || d.Session != event.Session { t.Fatalf("bad ack: %#v", d) } key = "_rexec/" + event.Session + "/" + a.Config.NodeName + "/out/00000" - d = getKV(t, a.Agent, key) + d = getKV(t, a.Agent, key, token) if d == nil || d.Session != event.Session || !bytes.Equal(d.Value, output) { t.Fatalf("bad output: %#v", d) } key = "_rexec/" + event.Session + "/" + a.Config.NodeName + "/out/0000a" - d = getKV(t, a.Agent, key) + d = getKV(t, a.Agent, key, token) if d == nil || d.Session != event.Session || !bytes.Equal(d.Value, output) { t.Fatalf("bad output: %#v", d) } key = "_rexec/" + event.Session + "/" + a.Config.NodeName + "/exit" - d = getKV(t, a.Agent, key) + d = getKV(t, a.Agent, key, token) if d == nil || d.Session != event.Session || string(d.Value) != "1" { t.Fatalf("bad output: %#v", d) } @@ -210,9 +255,9 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string event := &remoteExecEvent{ Prefix: "_rexec", - Session: makeRexecSession(t, a.Agent), + Session: makeRexecSession(t, a.Agent, ""), } - defer destroySession(t, a.Agent, event.Session) + defer destroySession(t, a.Agent, event.Session, "") spec := &remoteExecSpec{ Command: command, @@ -223,7 +268,7 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string t.Fatalf("err: %v", err) } key := "_rexec/" + event.Session + "/job" - setKV(t, a.Agent, key, buf) + setKV(t, a.Agent, key, buf, "") buf, err = json.Marshal(event) if err != nil { @@ -239,14 +284,14 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string // Verify we have an ack key = "_rexec/" + event.Session + "/" + a.Config.NodeName + "/ack" - d := getKV(t, a.Agent, key) + d := getKV(t, a.Agent, key, "") if d == nil || d.Session != event.Session { t.Fatalf("bad ack: %#v", d) } // Verify we have output key = "_rexec/" + event.Session + "/" + a.Config.NodeName + "/out/00000" - d = getKV(t, a.Agent, key) + d = getKV(t, a.Agent, key, "") if d == nil || d.Session != event.Session || !bytes.Contains(d.Value, []byte(expectedSubstring)) { t.Fatalf("bad output: %#v", d) @@ -254,7 +299,7 @@ func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string // Verify we have an exit code key = "_rexec/" + event.Session + "/" + a.Config.NodeName + "/exit" - d = getKV(t, a.Agent, key) + d = getKV(t, a.Agent, key, "") if d == nil || d.Session != event.Session || string(d.Value) != expectedReturnCode { t.Fatalf("bad output: %#v", d) } @@ -270,7 +315,7 @@ func TestHandleRemoteExecFailed(t *testing.T) { testHandleRemoteExec(t, "echo failing;exit 2", "failing", "2") } -func makeRexecSession(t *testing.T, a *Agent) string { +func makeRexecSession(t *testing.T, a *Agent, token string) string { args := structs.SessionRequest{ Datacenter: a.config.Datacenter, Op: structs.SessionCreate, @@ -278,6 +323,9 @@ func makeRexecSession(t *testing.T, a *Agent) string { Node: a.config.NodeName, LockDelay: 15 * time.Second, }, + WriteRequest: structs.WriteRequest{ + Token: token, + }, } var out string if err := a.RPC("Session.Apply", &args, &out); err != nil { @@ -286,13 +334,16 @@ func makeRexecSession(t *testing.T, a *Agent) string { return out } -func destroySession(t *testing.T, a *Agent, session string) { +func destroySession(t *testing.T, a *Agent, session string, token string) { args := structs.SessionRequest{ Datacenter: a.config.Datacenter, Op: structs.SessionDestroy, Session: structs.Session{ ID: session, }, + WriteRequest: structs.WriteRequest{ + Token: token, + }, } var out string if err := a.RPC("Session.Apply", &args, &out); err != nil { @@ -300,7 +351,7 @@ func destroySession(t *testing.T, a *Agent, session string) { } } -func setKV(t *testing.T, a *Agent, key string, val []byte) { +func setKV(t *testing.T, a *Agent, key string, val []byte, token string) { write := structs.KVSRequest{ Datacenter: a.config.Datacenter, Op: api.KVSet, @@ -308,20 +359,24 @@ func setKV(t *testing.T, a *Agent, key string, val []byte) { Key: key, Value: val, }, + WriteRequest: structs.WriteRequest{ + Token: token, + }, } - write.Token = a.config.ACLToken var success bool if err := a.RPC("KVS.Apply", &write, &success); err != nil { t.Fatalf("err: %v", err) } } -func getKV(t *testing.T, a *Agent, key string) *structs.DirEntry { +func getKV(t *testing.T, a *Agent, key string, token string) *structs.DirEntry { req := structs.KeyRequest{ Datacenter: a.config.Datacenter, Key: key, + QueryOptions: structs.QueryOptions{ + Token: token, + }, } - req.Token = a.config.ACLToken var out structs.IndexedDirEntries if err := a.RPC("KVS.Get", &req, &out); err != nil { t.Fatalf("err: %v", err) diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index be8a6fbf0..88e8df4b3 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -459,11 +459,13 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass `acl_enforce_version_8` is set to true. * `acl_agent_token` - Used for clients - and servers to perform internal operations to the service catalog. If this isn't specified, then - the `acl_token` will be used. This was added in Consul 0.7.2. + and servers to perform internal operations. If this isn't specified, then the + `acl_token` will be used. This was added in Consul 0.7.2.

This token must at least have write access to the node name it will register as in order to set any - of the node-level information in the catalog such as metadata, or the node's tagged addresses. + of the node-level information in the catalog such as metadata, or the node's tagged addresses. There + are other places this token is used, please see [ACL Agent Token](/docs/guides/acl.html#acl-agent-token) + for more details. * `acl_enforce_version_8` - Used for clients and servers to determine if enforcement should occur for new ACL policies being diff --git a/website/source/docs/guides/acl.html.md b/website/source/docs/guides/acl.html.md index a8a9296d1..a2794fcd3 100644 --- a/website/source/docs/guides/acl.html.md +++ b/website/source/docs/guides/acl.html.md @@ -128,15 +128,15 @@ system, or accessing Consul in special situations: | Special Token | Servers | Clients | Purpose | | ------------- | ------- | ------- | ------- | -| [`acl_agent_master_token`](/docs/agent/options.html#acl_agent_master_token) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api/agent.html) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations | -| [`acl_agent_token`](/docs/agent/options.html#acl_agent_token) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations with the [Catalog API](/api/catalog.html); this needs to have at least `node` policy access so the agent can self update its registration information, and also needs `service` read access for all services that will be registered with that node for [anti-entropy](/docs/internals/anti-entropy.html) syncing | -| [`acl_master_token`](/docs/agent/options.html#acl_master_token) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see details below | +| [`acl_agent_master_token`](/docs/agent/options.html#acl_agent_master_token) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api/agent.html) when the ACL datacenter isn't available, or servers are offline (for clients); used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | +| [`acl_agent_token`](/docs/agent/options.html#acl_agent_token) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | +| [`acl_master_token`](/docs/agent/options.html#acl_master_token) | `REQUIRED` | `N/A` | Special token used to bootstrap the ACL system, see the [Bootstrapping ACLs](#bootstrapping-acls) section for more details | | [`acl_token`](/docs/agent/options.html#acl_token) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | -Since it is designed to be used when the Consul servers are not available, the -`acl_agent_master_token` is managed locally on the agent and does not need to have a -policy defined on the Consul servers via the ACL API. Once set, it implicitly has the -following policy associated with it (the `node` policy was added in Consul 0.9.0): + +**`ACL Agent Master Token`** + +Since the [`acl_agent_master_token`](/docs/agent/options.html#acl_agent_master_token) is designed to be used when the Consul servers are not available, its policy is managed locally on the agent and does not need to have a token defined on the Consul servers via the ACL API. Once set, it implicitly has the following policy associated with it (the `node` policy was added in Consul 0.9.0): ```text agent "" { @@ -147,6 +147,32 @@ node "" { } ``` + +**`ACL Agent Token`** + +The [`acl_agent_token`](/docs/agent/options.html#acl_agent_token) is a special token that is used for an agent's internal operations. It isn't used directly for any user-initiated operations like the [`acl_token`](/docs/agent/options.html#acl_token), though if the `acl_agent_token` isn't configured the `acl_token` will be used. The ACL agent token is used for the following operations by the agent: + +1. Updating the agent's node entry using the [Catalog API](/api/catalog.html), including updating its node metadata, tagged addresses, and network coordinates +2. Performing [anti-entropy](/docs/internals/anti-entropy.html) syncing, in particular reading the node metadata and services registered with the catalog +3. Reading and writing the special `_rexec` section of the KV store when executing [`consul exec`](/docs/commands/exec.html) commands + +Here's an example policy sufficient to accomplish the above for a node called `mynode`: + +```text +node "mynode" { + policy = "write" +} +service "" { + policy = "read" +} +key "_rexec" { + policy = "write" +} +``` + +The `service` policy needs `read` access for any services that can be registered on the agent. If [remote exec is disabled](/docs/agent/options.html#disable_remote_exec), the default, then the `key` policy can be omitted. + +>>>>>>> Changes remote exec KV read to call GetTokenForAgent(), which can use #### Bootstrapping ACLs Bootstrapping ACLs on a new cluster requires a few steps, outlined in the example in this @@ -239,6 +265,8 @@ catalog: 2017/07/08 23:42:59 [INFO] agent: Synced node info ``` +See the [ACL Agent Token](#acl-agent-token) section for more details. + **Enable ACLs on the Consul Clients** Since ACL enforcement also occurs on the Consul clients, we need to also restart them