open-consul/acl/acl_test.go

2929 lines
102 KiB
Go
Raw Normal View History

2014-08-06 22:08:17 +00:00
package acl
import (
"fmt"
2014-08-06 22:08:17 +00:00
"testing"
"github.com/stretchr/testify/require"
2014-08-06 22:08:17 +00:00
)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func legacyPolicy(policy *Policy) *Policy {
return &Policy{
PolicyRules: PolicyRules{
Agents: policy.Agents,
AgentPrefixes: policy.Agents,
Nodes: policy.Nodes,
NodePrefixes: policy.Nodes,
Keys: policy.Keys,
KeyPrefixes: policy.Keys,
Services: policy.Services,
ServicePrefixes: policy.Services,
Sessions: policy.Sessions,
SessionPrefixes: policy.Sessions,
Events: policy.Events,
EventPrefixes: policy.Events,
PreparedQueries: policy.PreparedQueries,
PreparedQueryPrefixes: policy.PreparedQueries,
Keyring: policy.Keyring,
Operator: policy.Operator,
2021-08-20 22:11:01 +00:00
Mesh: policy.Mesh,
Peering: policy.Peering,
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
}
//
// The following 1 line functions are created to all conform to what
// can be stored in the aclCheck type to make defining ACL tests
// nicer in the embedded struct within TestACL
//
func checkAllowACLRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ACLRead(entCtx))
}
func checkAllowACLWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ACLWrite(entCtx))
}
func checkAllowAgentRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.AgentRead(prefix, entCtx))
}
func checkAllowAgentWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.AgentWrite(prefix, entCtx))
}
func checkAllowEventRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.EventRead(prefix, entCtx))
}
func checkAllowEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.EventWrite(prefix, entCtx))
}
func checkAllowIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.IntentionDefaultAllow(entCtx))
}
func checkAllowIntentionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.IntentionRead(prefix, entCtx))
}
func checkAllowIntentionWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.IntentionWrite(prefix, entCtx))
}
func checkAllowKeyRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.KeyRead(prefix, entCtx))
}
func checkAllowKeyList(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.KeyList(prefix, entCtx))
}
func checkAllowKeyringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.KeyringRead(entCtx))
}
func checkAllowKeyringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.KeyringWrite(entCtx))
}
func checkAllowKeyWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.KeyWrite(prefix, entCtx))
}
func checkAllowKeyWritePrefix(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.KeyWritePrefix(prefix, entCtx))
}
func checkAllowNodeRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.NodeRead(prefix, entCtx))
}
func checkAllowNodeReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.NodeReadAll(entCtx))
}
func checkAllowNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.NodeWrite(prefix, entCtx))
}
2021-08-20 22:11:01 +00:00
func checkAllowMeshRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.MeshRead(entCtx))
}
func checkAllowMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.MeshWrite(entCtx))
}
func checkAllowPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.PeeringRead(entCtx))
}
func checkAllowPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.PeeringWrite(entCtx))
}
func checkAllowOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.OperatorRead(entCtx))
}
func checkAllowOperatorWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.OperatorWrite(entCtx))
}
func checkAllowPreparedQueryRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.PreparedQueryRead(prefix, entCtx))
}
func checkAllowPreparedQueryWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.PreparedQueryWrite(prefix, entCtx))
}
func checkAllowServiceRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ServiceRead(prefix, entCtx))
}
func checkAllowServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ServiceReadAll(entCtx))
}
func checkAllowServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ServiceWrite(prefix, entCtx))
}
func checkAllowServiceWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.ServiceWriteAny(entCtx))
}
func checkAllowSessionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.SessionRead(prefix, entCtx))
}
func checkAllowSessionWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.SessionWrite(prefix, entCtx))
}
2014-08-06 22:08:17 +00:00
func checkAllowSnapshot(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.Snapshot(entCtx))
}
2014-08-06 22:08:17 +00:00
func checkDenyACLRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ACLRead(entCtx))
}
func checkDenyACLWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ACLWrite(entCtx))
}
func checkDenyAgentRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.AgentRead(prefix, entCtx))
}
func checkDenyAgentWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.AgentWrite(prefix, entCtx))
}
func checkDenyEventRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.EventRead(prefix, entCtx))
}
func checkDenyEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.EventWrite(prefix, entCtx))
}
func checkDenyIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.IntentionDefaultAllow(entCtx))
}
func checkDenyIntentionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.IntentionRead(prefix, entCtx))
}
func checkDenyIntentionWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.IntentionWrite(prefix, entCtx))
}
func checkDenyKeyRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.KeyRead(prefix, entCtx))
}
func checkDenyKeyList(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.KeyList(prefix, entCtx))
}
func checkDenyKeyringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.KeyringRead(entCtx))
}
func checkDenyKeyringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.KeyringWrite(entCtx))
}
func checkDenyKeyWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.KeyWrite(prefix, entCtx))
}
func checkDenyKeyWritePrefix(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.KeyWritePrefix(prefix, entCtx))
}
func checkDenyNodeRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.NodeRead(prefix, entCtx))
}
func checkDenyNodeReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.NodeReadAll(entCtx))
}
func checkDenyNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.NodeWrite(prefix, entCtx))
}
2021-08-20 22:11:01 +00:00
func checkDenyMeshRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.MeshRead(entCtx))
}
func checkDenyMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.MeshWrite(entCtx))
}
func checkDenyPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.PeeringRead(entCtx))
}
func checkDenyPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.PeeringWrite(entCtx))
}
func checkDenyOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.OperatorRead(entCtx))
}
func checkDenyOperatorWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.OperatorWrite(entCtx))
}
func checkDenyPreparedQueryRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.PreparedQueryRead(prefix, entCtx))
}
func checkDenyPreparedQueryWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.PreparedQueryWrite(prefix, entCtx))
}
func checkDenyServiceRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ServiceRead(prefix, entCtx))
}
func checkDenyServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ServiceReadAll(entCtx))
}
func checkDenyServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ServiceWrite(prefix, entCtx))
}
func checkDenyServiceWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.ServiceWriteAny(entCtx))
}
func checkDenySessionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.SessionRead(prefix, entCtx))
}
func checkDenySessionWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.SessionWrite(prefix, entCtx))
}
func checkDenySnapshot(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.Snapshot(entCtx))
}
func checkDefaultACLRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ACLRead(entCtx))
}
func checkDefaultACLWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ACLWrite(entCtx))
}
func checkDefaultAgentRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.AgentRead(prefix, entCtx))
}
func checkDefaultAgentWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.AgentWrite(prefix, entCtx))
}
func checkDefaultEventRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.EventRead(prefix, entCtx))
}
func checkDefaultEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.EventWrite(prefix, entCtx))
}
func checkDefaultIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.IntentionDefaultAllow(entCtx))
}
func checkDefaultIntentionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.IntentionRead(prefix, entCtx))
}
func checkDefaultIntentionWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.IntentionWrite(prefix, entCtx))
}
func checkDefaultKeyRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.KeyRead(prefix, entCtx))
}
func checkDefaultKeyList(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.KeyList(prefix, entCtx))
}
func checkDefaultKeyringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.KeyringRead(entCtx))
}
func checkDefaultKeyringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.KeyringWrite(entCtx))
}
func checkDefaultKeyWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.KeyWrite(prefix, entCtx))
}
func checkDefaultKeyWritePrefix(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.KeyWritePrefix(prefix, entCtx))
}
func checkDefaultNodeRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.NodeRead(prefix, entCtx))
}
func checkDefaultNodeReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.NodeReadAll(entCtx))
}
func checkDefaultNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.NodeWrite(prefix, entCtx))
}
2021-08-20 22:11:01 +00:00
func checkDefaultMeshRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.MeshRead(entCtx))
}
func checkDefaultMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.MeshWrite(entCtx))
}
func checkDefaultPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.PeeringRead(entCtx))
}
func checkDefaultPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.PeeringWrite(entCtx))
}
func checkDefaultOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.OperatorRead(entCtx))
}
func checkDefaultOperatorWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.OperatorWrite(entCtx))
}
func checkDefaultPreparedQueryRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.PreparedQueryRead(prefix, entCtx))
}
func checkDefaultPreparedQueryWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.PreparedQueryWrite(prefix, entCtx))
}
func checkDefaultServiceRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ServiceRead(prefix, entCtx))
}
func checkDefaultServiceReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ServiceReadAll(entCtx))
}
func checkDefaultServiceWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ServiceWrite(prefix, entCtx))
}
func checkDefaultServiceWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.ServiceWriteAny(entCtx))
}
func checkDefaultSessionRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.SessionRead(prefix, entCtx))
}
func checkDefaultSessionWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.SessionWrite(prefix, entCtx))
}
func checkDefaultSnapshot(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.Snapshot(entCtx))
}
func TestACL(t *testing.T) {
type aclCheck struct {
name string
prefix string
check func(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext)
}
type aclTest struct {
name string
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
defaultPolicy Authorizer
policyStack []*Policy
checks []aclCheck
Adds support for snapshots and restores. (#2396) * Updates Raft library to get new snapshot/restore API. * Basic backup and restore working, but need some cleanup. * Breaks out a snapshot module and adds a SHA256 integrity check. * Adds snapshot ACL and fills in some missing comments. * Require a consistent read for snapshots. * Make sure snapshot works if ACLs aren't enabled. * Adds a bit of package documentation. * Returns an empty response from restore to avoid EOF errors. * Adds API client support for snapshots. * Makes internal file names match on-disk file snapshots. * Adds DC and token coverage for snapshot API test. * Adds missing documentation. * Adds a unit test for the snapshot client endpoint. * Moves the connection pool out of the client for easier testing. * Fixes an incidental issue in the prepared query unit test. I realized I had two servers in bootstrap mode so this wasn't a good setup. * Adds a half close to the TCP stream and fixes panic on error. * Adds client and endpoint tests for snapshots. * Moves the pool back into the snapshot RPC client. * Adds a TLS test and fixes half-closes for TLS connections. * Tweaks some comments. * Adds a low-level snapshot test. This is independent of Consul so we can pull this out into a library later if we want to. * Cleans up snapshot and archive and completes archive tests. * Sends a clear error for snapshot operations in dev mode. Snapshots require the Raft snapshots to be readable, which isn't supported in dev mode. Send a clear error instead of a deep-down Raft one. * Adds docs for the snapshot endpoint. * Adds a stale mode and index feedback for snapshot saves. This gives folks a way to extract data even if the cluster has no leader. * Changes the internal format of a snapshot from zip to tgz. * Pulls in Raft fix to cancel inflight before a restore. * Pulls in new Raft restore interface. * Adds metadata to snapshot saves and a verify function. * Adds basic save and restore snapshot CLI commands. * Gets rid of tarball extensions and adds restore message. * Fixes an incidental bad link in the KV docs. * Adds documentation for the snapshot CLI commands. * Scuttle any request body when a snapshot is saved. * Fixes archive unit test error message check. * Allows for nil output writers in snapshot RPC handlers. * Renames hash list Decode to DecodeAndVerify. * Closes the client connection for snapshot ops. * Lowers timeout for restore ops. * Updates Raft vendor to get new Restore signature and integrates with Consul. * Bounces the leader's internal state when we do a restore.
2016-10-26 02:20:24 +00:00
}
2014-08-06 22:08:17 +00:00
tests := []aclTest{
{
name: "DenyAll",
defaultPolicy: DenyAll(),
checks: []aclCheck{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
{name: "DenyACLRead", check: checkDenyACLRead},
{name: "DenyACLWrite", check: checkDenyACLWrite},
{name: "DenyAgentRead", check: checkDenyAgentRead},
{name: "DenyAgentWrite", check: checkDenyAgentWrite},
{name: "DenyEventRead", check: checkDenyEventRead},
{name: "DenyEventWrite", check: checkDenyEventWrite},
{name: "DenyIntentionDefaultAllow", check: checkDenyIntentionDefaultAllow},
{name: "DenyIntentionRead", check: checkDenyIntentionRead},
{name: "DenyIntentionWrite", check: checkDenyIntentionWrite},
{name: "DenyKeyRead", check: checkDenyKeyRead},
{name: "DenyKeyringRead", check: checkDenyKeyringRead},
{name: "DenyKeyringWrite", check: checkDenyKeyringWrite},
{name: "DenyKeyWrite", check: checkDenyKeyWrite},
{name: "DenyNodeRead", check: checkDenyNodeRead},
{name: "DenyNodeReadAll", check: checkDenyNodeReadAll},
{name: "DenyNodeWrite", check: checkDenyNodeWrite},
2021-08-20 22:11:01 +00:00
{name: "DenyMeshRead", check: checkDenyMeshRead},
{name: "DenyMeshWrite", check: checkDenyMeshWrite},
{name: "DenyPeeringRead", check: checkDenyPeeringRead},
{name: "DenyPeeringWrite", check: checkDenyPeeringWrite},
{name: "DenyOperatorRead", check: checkDenyOperatorRead},
{name: "DenyOperatorWrite", check: checkDenyOperatorWrite},
{name: "DenyPreparedQueryRead", check: checkDenyPreparedQueryRead},
{name: "DenyPreparedQueryWrite", check: checkDenyPreparedQueryWrite},
{name: "DenyServiceRead", check: checkDenyServiceRead},
{name: "DenyServiceReadAll", check: checkDenyServiceReadAll},
{name: "DenyServiceWrite", check: checkDenyServiceWrite},
{name: "DenySessionRead", check: checkDenySessionRead},
{name: "DenySessionWrite", check: checkDenySessionWrite},
{name: "DenySnapshot", check: checkDenySnapshot},
},
},
{
name: "AllowAll",
defaultPolicy: AllowAll(),
checks: []aclCheck{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
{name: "DenyACLRead", check: checkDenyACLRead},
{name: "DenyACLWrite", check: checkDenyACLWrite},
{name: "AllowAgentRead", check: checkAllowAgentRead},
{name: "AllowAgentWrite", check: checkAllowAgentWrite},
{name: "AllowEventRead", check: checkAllowEventRead},
{name: "AllowEventWrite", check: checkAllowEventWrite},
{name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow},
{name: "AllowIntentionRead", check: checkAllowIntentionRead},
{name: "AllowIntentionWrite", check: checkAllowIntentionWrite},
{name: "AllowKeyRead", check: checkAllowKeyRead},
{name: "AllowKeyringRead", check: checkAllowKeyringRead},
{name: "AllowKeyringWrite", check: checkAllowKeyringWrite},
{name: "AllowKeyWrite", check: checkAllowKeyWrite},
{name: "AllowNodeRead", check: checkAllowNodeRead},
{name: "AllowNodeReadAll", check: checkAllowNodeReadAll},
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
2021-08-20 22:11:01 +00:00
{name: "AllowMeshRead", check: checkAllowMeshRead},
{name: "AllowMeshWrite", check: checkAllowMeshWrite},
{name: "AllowPeeringRead", check: checkAllowPeeringRead},
{name: "AllowPeeringWrite", check: checkAllowPeeringWrite},
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
{name: "AllowPreparedQueryWrite", check: checkAllowPreparedQueryWrite},
{name: "AllowServiceRead", check: checkAllowServiceRead},
{name: "AllowServiceReadAll", check: checkAllowServiceReadAll},
{name: "AllowServiceWrite", check: checkAllowServiceWrite},
{name: "AllowSessionRead", check: checkAllowSessionRead},
{name: "AllowSessionWrite", check: checkAllowSessionWrite},
{name: "DenySnapshot", check: checkDenySnapshot},
},
},
{
name: "ManageAll",
defaultPolicy: ManageAll(),
checks: []aclCheck{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
{name: "AllowACLRead", check: checkAllowACLRead},
{name: "AllowACLWrite", check: checkAllowACLWrite},
{name: "AllowAgentRead", check: checkAllowAgentRead},
{name: "AllowAgentWrite", check: checkAllowAgentWrite},
{name: "AllowEventRead", check: checkAllowEventRead},
{name: "AllowEventWrite", check: checkAllowEventWrite},
{name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow},
{name: "AllowIntentionRead", check: checkAllowIntentionRead},
{name: "AllowIntentionWrite", check: checkAllowIntentionWrite},
{name: "AllowKeyRead", check: checkAllowKeyRead},
{name: "AllowKeyringRead", check: checkAllowKeyringRead},
{name: "AllowKeyringWrite", check: checkAllowKeyringWrite},
{name: "AllowKeyWrite", check: checkAllowKeyWrite},
{name: "AllowNodeRead", check: checkAllowNodeRead},
{name: "AllowNodeReadAll", check: checkAllowNodeReadAll},
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
2021-08-20 22:11:01 +00:00
{name: "AllowMeshRead", check: checkAllowMeshRead},
{name: "AllowMeshWrite", check: checkAllowMeshWrite},
{name: "AllowPeeringRead", check: checkAllowPeeringRead},
{name: "AllowPeeringWrite", check: checkAllowPeeringWrite},
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
{name: "AllowPreparedQueryWrite", check: checkAllowPreparedQueryWrite},
{name: "AllowServiceRead", check: checkAllowServiceRead},
{name: "AllowServiceReadAll", check: checkAllowServiceReadAll},
{name: "AllowServiceWrite", check: checkAllowServiceWrite},
{name: "AllowSessionRead", check: checkAllowSessionRead},
{name: "AllowSessionWrite", check: checkAllowSessionWrite},
{name: "AllowSnapshot", check: checkAllowSnapshot},
},
},
{
name: "AgentBasicDefaultDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "root",
Policy: PolicyRead,
},
{
Node: "root-nope",
Policy: PolicyDeny,
},
{
Node: "root-rw",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
2014-08-06 22:08:17 +00:00
},
checks: []aclCheck{
{name: "DefaultReadDenied", prefix: "ro", check: checkDenyAgentRead},
{name: "DefaultWriteDenied", prefix: "ro", check: checkDenyAgentWrite},
{name: "ROReadAllowed", prefix: "root", check: checkAllowAgentRead},
{name: "ROWriteDenied", prefix: "root", check: checkDenyAgentWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro", check: checkAllowAgentRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro", check: checkDenyAgentWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowAgentRead},
{name: "RWWriteDenied", prefix: "root-rw", check: checkAllowAgentWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-sub", check: checkAllowAgentRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-sub", check: checkAllowAgentWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyAgentRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenyAgentWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-sub", check: checkDenyAgentRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-sub", check: checkDenyAgentWrite},
2014-08-06 22:08:17 +00:00
},
},
{
name: "AgentBasicDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "root",
Policy: PolicyRead,
},
{
Node: "root-nope",
Policy: PolicyDeny,
},
{
Node: "root-rw",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
2014-08-06 22:08:17 +00:00
},
checks: []aclCheck{
{name: "DefaultReadDenied", prefix: "ro", check: checkAllowAgentRead},
{name: "DefaultWriteDenied", prefix: "ro", check: checkAllowAgentWrite},
{name: "ROReadAllowed", prefix: "root", check: checkAllowAgentRead},
{name: "ROWriteDenied", prefix: "root", check: checkDenyAgentWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro", check: checkAllowAgentRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro", check: checkDenyAgentWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowAgentRead},
{name: "RWWriteDenied", prefix: "root-rw", check: checkAllowAgentWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-sub", check: checkAllowAgentRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-sub", check: checkAllowAgentWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyAgentRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenyAgentWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-sub", check: checkDenyAgentRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-sub", check: checkDenyAgentWrite},
},
},
{
name: "PreparedQueryDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
PreparedQueries: []*PreparedQueryRule{
{
Prefix: "other",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
2014-08-06 22:08:17 +00:00
},
checks: []aclCheck{
// in version 1.2.1 and below this would have failed
{name: "ReadAllowed", prefix: "foo", check: checkAllowPreparedQueryRead},
// in version 1.2.1 and below this would have failed
{name: "WriteAllowed", prefix: "foo", check: checkAllowPreparedQueryWrite},
{name: "ReadDenied", prefix: "other", check: checkDenyPreparedQueryRead},
{name: "WriteDenied", prefix: "other", check: checkDenyPreparedQueryWrite},
},
2014-08-06 22:08:17 +00:00
},
{
name: "AgentNestedDefaultDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "root-nope",
Policy: PolicyDeny,
},
{
Node: "root-ro",
Policy: PolicyRead,
},
{
Node: "root-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "child-nope",
Policy: PolicyDeny,
},
{
Node: "child-ro",
Policy: PolicyRead,
},
{
Node: "child-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
Creates new "prepared-query" ACL type and new token capture behavior. Prior to this change, prepared queries had the following behavior for ACLs, which will need to change to support templates: 1. A management token, or a token with read access to the service being queried needed to be provided in order to create a prepared query. 2. The token used to create the prepared query was stored with the query in the state store and used to execute the query. 3. A management token, or the token used to create the query needed to be supplied to perform and CRUD operations on an existing prepared query. This was pretty subtle and complicated behavior, and won't work for templates since the service name is computed at execution time. To solve this, we introduce a new "prepared-query" ACL type, where the prefix applies to the query name for static prepared query types and to the prefix for template prepared query types. With this change, the new behavior is: 1. A management token, or a token with "prepared-query" write access to the query name or (soon) the given template prefix is required to do any CRUD operations on a prepared query, or to list prepared queries (the list is filtered by this ACL). 2. You will no longer need a management token to list prepared queries, but you will only be able to see prepared queries that you have access to (you get an empty list instead of permission denied). 3. When listing or getting a query, because it was easy to capture management tokens given the past behavior, this will always blank out the "Token" field (replacing the contents as <hidden>) for all tokens unless a management token is supplied. Going forward, we should discourage people from binding tokens for execution unless strictly necessary. 4. No token will be captured by default when a prepared query is created. If the user wishes to supply an execution token then can pass it in via the "Token" field in the prepared query definition. Otherwise, this field will default to empty. 5. At execution time, we will use the captured token if it exists with the prepared query definition, otherwise we will use the token that's passed in with the request, just like we do for other RPCs (or you can use the agent's configured token for DNS). 6. Prepared queries with no name (accessible only by ID) will not require ACLs to create or modify (execution time will depend on the service ACL configuration). Our argument here is that these are designed to be ephemeral and the IDs are as good as an ACL. Management tokens will be able to list all of these. These changes enable templates, but also enable delegation of authority to manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
},
checks: []aclCheck{
{name: "DefaultReadDenied", prefix: "nope", check: checkDenyAgentRead},
{name: "DefaultWriteDenied", prefix: "nope", check: checkDenyAgentWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyAgentRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenyAgentWrite},
{name: "ROReadAllowed", prefix: "root-ro", check: checkAllowAgentRead},
{name: "ROWriteDenied", prefix: "root-ro", check: checkDenyAgentWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowAgentRead},
{name: "RWWriteAllowed", prefix: "root-rw", check: checkAllowAgentWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-prefix", check: checkDenyAgentRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-prefix", check: checkDenyAgentWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro-prefix", check: checkAllowAgentRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro-prefix", check: checkDenyAgentWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-prefix", check: checkAllowAgentRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-prefix", check: checkAllowAgentWrite},
{name: "ChildDenyReadDenied", prefix: "child-nope", check: checkDenyAgentRead},
{name: "ChildDenyWriteDenied", prefix: "child-nope", check: checkDenyAgentWrite},
{name: "ChildROReadAllowed", prefix: "child-ro", check: checkAllowAgentRead},
{name: "ChildROWriteDenied", prefix: "child-ro", check: checkDenyAgentWrite},
{name: "ChildRWReadAllowed", prefix: "child-rw", check: checkAllowAgentRead},
{name: "ChildRWWriteAllowed", prefix: "child-rw", check: checkAllowAgentWrite},
{name: "ChildDenySuffixReadDenied", prefix: "child-nope-prefix", check: checkDenyAgentRead},
{name: "ChildDenySuffixWriteDenied", prefix: "child-nope-prefix", check: checkDenyAgentWrite},
{name: "ChildROSuffixReadAllowed", prefix: "child-ro-prefix", check: checkAllowAgentRead},
{name: "ChildROSuffixWriteDenied", prefix: "child-ro-prefix", check: checkDenyAgentWrite},
{name: "ChildRWSuffixReadAllowed", prefix: "child-rw-prefix", check: checkAllowAgentRead},
{name: "ChildRWSuffixWriteAllowed", prefix: "child-rw-prefix", check: checkAllowAgentWrite},
{name: "ChildOverrideReadAllowed", prefix: "override", check: checkAllowAgentRead},
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowAgentWrite},
Creates new "prepared-query" ACL type and new token capture behavior. Prior to this change, prepared queries had the following behavior for ACLs, which will need to change to support templates: 1. A management token, or a token with read access to the service being queried needed to be provided in order to create a prepared query. 2. The token used to create the prepared query was stored with the query in the state store and used to execute the query. 3. A management token, or the token used to create the query needed to be supplied to perform and CRUD operations on an existing prepared query. This was pretty subtle and complicated behavior, and won't work for templates since the service name is computed at execution time. To solve this, we introduce a new "prepared-query" ACL type, where the prefix applies to the query name for static prepared query types and to the prefix for template prepared query types. With this change, the new behavior is: 1. A management token, or a token with "prepared-query" write access to the query name or (soon) the given template prefix is required to do any CRUD operations on a prepared query, or to list prepared queries (the list is filtered by this ACL). 2. You will no longer need a management token to list prepared queries, but you will only be able to see prepared queries that you have access to (you get an empty list instead of permission denied). 3. When listing or getting a query, because it was easy to capture management tokens given the past behavior, this will always blank out the "Token" field (replacing the contents as <hidden>) for all tokens unless a management token is supplied. Going forward, we should discourage people from binding tokens for execution unless strictly necessary. 4. No token will be captured by default when a prepared query is created. If the user wishes to supply an execution token then can pass it in via the "Token" field in the prepared query definition. Otherwise, this field will default to empty. 5. At execution time, we will use the captured token if it exists with the prepared query definition, otherwise we will use the token that's passed in with the request, just like we do for other RPCs (or you can use the agent's configured token for DNS). 6. Prepared queries with no name (accessible only by ID) will not require ACLs to create or modify (execution time will depend on the service ACL configuration). Our argument here is that these are designed to be ephemeral and the IDs are as good as an ACL. Management tokens will be able to list all of these. These changes enable templates, but also enable delegation of authority to manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
},
},
{
name: "AgentNestedDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "root-nope",
Policy: PolicyDeny,
},
{
Node: "root-ro",
Policy: PolicyRead,
},
{
Node: "root-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "child-nope",
Policy: PolicyDeny,
},
{
Node: "child-ro",
Policy: PolicyRead,
},
{
Node: "child-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
Creates new "prepared-query" ACL type and new token capture behavior. Prior to this change, prepared queries had the following behavior for ACLs, which will need to change to support templates: 1. A management token, or a token with read access to the service being queried needed to be provided in order to create a prepared query. 2. The token used to create the prepared query was stored with the query in the state store and used to execute the query. 3. A management token, or the token used to create the query needed to be supplied to perform and CRUD operations on an existing prepared query. This was pretty subtle and complicated behavior, and won't work for templates since the service name is computed at execution time. To solve this, we introduce a new "prepared-query" ACL type, where the prefix applies to the query name for static prepared query types and to the prefix for template prepared query types. With this change, the new behavior is: 1. A management token, or a token with "prepared-query" write access to the query name or (soon) the given template prefix is required to do any CRUD operations on a prepared query, or to list prepared queries (the list is filtered by this ACL). 2. You will no longer need a management token to list prepared queries, but you will only be able to see prepared queries that you have access to (you get an empty list instead of permission denied). 3. When listing or getting a query, because it was easy to capture management tokens given the past behavior, this will always blank out the "Token" field (replacing the contents as <hidden>) for all tokens unless a management token is supplied. Going forward, we should discourage people from binding tokens for execution unless strictly necessary. 4. No token will be captured by default when a prepared query is created. If the user wishes to supply an execution token then can pass it in via the "Token" field in the prepared query definition. Otherwise, this field will default to empty. 5. At execution time, we will use the captured token if it exists with the prepared query definition, otherwise we will use the token that's passed in with the request, just like we do for other RPCs (or you can use the agent's configured token for DNS). 6. Prepared queries with no name (accessible only by ID) will not require ACLs to create or modify (execution time will depend on the service ACL configuration). Our argument here is that these are designed to be ephemeral and the IDs are as good as an ACL. Management tokens will be able to list all of these. These changes enable templates, but also enable delegation of authority to manage the prepared query namespace.
2016-02-23 08:12:58 +00:00
},
checks: []aclCheck{
{name: "DefaultReadAllowed", prefix: "nope", check: checkAllowAgentRead},
{name: "DefaultWriteAllowed", prefix: "nope", check: checkAllowAgentWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyAgentRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenyAgentWrite},
{name: "ROReadAllowed", prefix: "root-ro", check: checkAllowAgentRead},
{name: "ROWriteDenied", prefix: "root-ro", check: checkDenyAgentWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowAgentRead},
{name: "RWWriteAllowed", prefix: "root-rw", check: checkAllowAgentWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-prefix", check: checkDenyAgentRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-prefix", check: checkDenyAgentWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro-prefix", check: checkAllowAgentRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro-prefix", check: checkDenyAgentWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-prefix", check: checkAllowAgentRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-prefix", check: checkAllowAgentWrite},
{name: "ChildDenyReadDenied", prefix: "child-nope", check: checkDenyAgentRead},
{name: "ChildDenyWriteDenied", prefix: "child-nope", check: checkDenyAgentWrite},
{name: "ChildROReadAllowed", prefix: "child-ro", check: checkAllowAgentRead},
{name: "ChildROWriteDenied", prefix: "child-ro", check: checkDenyAgentWrite},
{name: "ChildRWReadAllowed", prefix: "child-rw", check: checkAllowAgentRead},
{name: "ChildRWWriteAllowed", prefix: "child-rw", check: checkAllowAgentWrite},
{name: "ChildDenySuffixReadDenied", prefix: "child-nope-prefix", check: checkDenyAgentRead},
{name: "ChildDenySuffixWriteDenied", prefix: "child-nope-prefix", check: checkDenyAgentWrite},
{name: "ChildROSuffixReadAllowed", prefix: "child-ro-prefix", check: checkAllowAgentRead},
{name: "ChildROSuffixWriteDenied", prefix: "child-ro-prefix", check: checkDenyAgentWrite},
{name: "ChildRWSuffixReadAllowed", prefix: "child-rw-prefix", check: checkAllowAgentRead},
{name: "ChildRWSuffixWriteAllowed", prefix: "child-rw-prefix", check: checkAllowAgentWrite},
{name: "ChildOverrideReadAllowed", prefix: "override", check: checkAllowAgentRead},
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowAgentWrite},
2015-06-18 01:56:29 +00:00
},
},
{
name: "KeyringDefaultAllowPolicyDeny",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keyring: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyKeyringRead},
// in version 1.2.1 and below this would have failed
{name: "WriteDenied", check: checkDenyKeyringWrite},
},
},
{
name: "KeyringDefaultAllowPolicyRead",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keyring: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowKeyringRead},
// in version 1.2.1 and below this would have failed
{name: "WriteDenied", check: checkDenyKeyringWrite},
},
},
{
name: "KeyringDefaultAllowPolicyWrite",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keyring: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowKeyringRead},
{name: "WriteAllowed", check: checkAllowKeyringWrite},
},
},
{
name: "KeyringDefaultAllowPolicyNone",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{},
2014-08-06 22:08:17 +00:00
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowKeyringRead},
{name: "WriteAllowed", check: checkAllowKeyringWrite},
2014-08-06 22:08:17 +00:00
},
},
{
name: "KeyringDefaultDenyPolicyDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keyring: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyKeyringRead},
{name: "WriteDenied", check: checkDenyKeyringWrite},
},
},
{
name: "KeyringDefaultDenyPolicyRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keyring: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowKeyringRead},
{name: "WriteDenied", check: checkDenyKeyringWrite},
},
},
{
name: "KeyringDefaultDenyPolicyWrite",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keyring: PolicyWrite,
},
},
2014-08-06 22:08:17 +00:00
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowKeyringRead},
{name: "WriteAllowed", check: checkAllowKeyringWrite},
2014-08-06 22:08:17 +00:00
},
},
{
name: "KeyringDefaultDenyPolicyNone",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyKeyringRead},
{name: "WriteDenied", check: checkDenyKeyringWrite},
2014-08-06 22:08:17 +00:00
},
},
2021-08-20 22:11:01 +00:00
{
name: "MeshDefaultAllowPolicyDeny",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Mesh: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
name: "MeshDefaultAllowPolicyRead",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Mesh: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
name: "MeshDefaultAllowPolicyWrite",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Mesh: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
name: "MeshDefaultAllowPolicyNone",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
name: "MeshDefaultDenyPolicyDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Mesh: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
name: "MeshDefaultDenyPolicyRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Mesh: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
name: "MeshDefaultDenyPolicyWrite",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Mesh: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
name: "MeshDefaultDenyPolicyNone",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:deny, m:deny = deny
name: "MeshOperatorDenyPolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Mesh: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:read, m:deny = deny
name: "MeshOperatorReadPolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Mesh: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:write, m:deny = deny
name: "MeshOperatorWritePolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Mesh: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:deny, m:read = read
name: "MeshOperatorDenyPolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Mesh: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:read, m:read = read
name: "MeshOperatorReadPolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Mesh: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:write, m:read = read
name: "MeshOperatorWritePolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Mesh: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:deny, m:write = write
name: "MeshOperatorDenyPolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Mesh: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
// o:read, m:write = write
name: "MeshOperatorReadPolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Mesh: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
// o:write, m:write = write
name: "MeshOperatorWritePolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Mesh: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
// o:deny, m:<none> = deny
name: "MeshOperatorDenyPolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:read, m:<none> = read
name: "MeshOperatorReadPolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteDenied", check: checkDenyMeshWrite},
},
},
{
// o:write, m:<none> = write
name: "MeshOperatorWritePolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowMeshRead},
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
name: "PeeringDefaultAllowPolicyDeny",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultAllowPolicyRead",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultAllowPolicyWrite",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "PeeringDefaultAllowPolicyNone",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyWrite",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyNone",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:deny, p:deny = deny
name: "PeeringOperatorDenyPolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:read, p:deny = deny
name: "PeeringOperatorReadPolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:write, p:deny = deny
name: "PeeringOperatorWritePolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:deny, p:read = read
name: "PeeringOperatorDenyPolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:read, p:read = read
name: "PeeringOperatorReadPolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:write, p:read = read
name: "PeeringOperatorWritePolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:deny, p:write = write
name: "PeeringOperatorDenyPolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
// o:read, p:write = write
name: "PeeringOperatorReadPolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
// o:write, p:write = write
name: "PeeringOperatorWritePolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
// o:deny, p:<none> = deny
name: "PeeringOperatorDenyPolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:read, p:<none> = read
name: "PeeringOperatorReadPolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:write, p:<none> = write
name: "PeeringOperatorWritePolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "OperatorDefaultAllowPolicyDeny",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyOperatorRead},
// in version 1.2.1 and below this would have failed
{name: "WriteDenied", check: checkDenyOperatorWrite},
},
},
{
name: "OperatorDefaultAllowPolicyRead",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowOperatorRead},
// in version 1.2.1 and below this would have failed
{name: "WriteDenied", check: checkDenyOperatorWrite},
},
},
{
name: "OperatorDefaultAllowPolicyWrite",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowOperatorRead},
{name: "WriteAllowed", check: checkAllowOperatorWrite},
},
},
{
name: "OperatorDefaultAllowPolicyNone",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowOperatorRead},
{name: "WriteAllowed", check: checkAllowOperatorWrite},
},
},
{
name: "OperatorDefaultDenyPolicyDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyOperatorRead},
{name: "WriteDenied", check: checkDenyOperatorWrite},
},
},
{
name: "OperatorDefaultDenyPolicyRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowOperatorRead},
{name: "WriteDenied", check: checkDenyOperatorWrite},
},
},
{
name: "OperatorDefaultDenyPolicyWrite",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowOperatorRead},
{name: "WriteAllowed", check: checkAllowOperatorWrite},
},
},
{
name: "OperatorDefaultDenyPolicyNone",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyOperatorRead},
{name: "WriteDenied", check: checkDenyOperatorWrite},
},
},
{
name: "NodeDefaultDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Nodes: []*NodeRule{
{
Name: "root-nope",
Policy: PolicyDeny,
},
{
Name: "root-ro",
Policy: PolicyRead,
},
{
Name: "root-rw",
Policy: PolicyWrite,
},
{
Name: "override",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Nodes: []*NodeRule{
{
Name: "child-nope",
Policy: PolicyDeny,
},
{
Name: "child-ro",
Policy: PolicyRead,
},
{
Name: "child-rw",
Policy: PolicyWrite,
},
{
Name: "override",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
},
checks: []aclCheck{
{name: "ReadAllDenied", prefix: "", check: checkDenyNodeReadAll},
{name: "DefaultReadDenied", prefix: "nope", check: checkDenyNodeRead},
{name: "DefaultWriteDenied", prefix: "nope", check: checkDenyNodeWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyNodeRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenyNodeWrite},
{name: "ROReadAllowed", prefix: "root-ro", check: checkAllowNodeRead},
{name: "ROWriteDenied", prefix: "root-ro", check: checkDenyNodeWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowNodeRead},
{name: "RWWriteAllowed", prefix: "root-rw", check: checkAllowNodeWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-prefix", check: checkDenyNodeRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-prefix", check: checkDenyNodeWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro-prefix", check: checkAllowNodeRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro-prefix", check: checkDenyNodeWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-prefix", check: checkAllowNodeRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-prefix", check: checkAllowNodeWrite},
{name: "ChildDenyReadDenied", prefix: "child-nope", check: checkDenyNodeRead},
{name: "ChildDenyWriteDenied", prefix: "child-nope", check: checkDenyNodeWrite},
{name: "ChildROReadAllowed", prefix: "child-ro", check: checkAllowNodeRead},
{name: "ChildROWriteDenied", prefix: "child-ro", check: checkDenyNodeWrite},
{name: "ChildRWReadAllowed", prefix: "child-rw", check: checkAllowNodeRead},
{name: "ChildRWWriteAllowed", prefix: "child-rw", check: checkAllowNodeWrite},
{name: "ChildDenySuffixReadDenied", prefix: "child-nope-prefix", check: checkDenyNodeRead},
{name: "ChildDenySuffixWriteDenied", prefix: "child-nope-prefix", check: checkDenyNodeWrite},
{name: "ChildROSuffixReadAllowed", prefix: "child-ro-prefix", check: checkAllowNodeRead},
{name: "ChildROSuffixWriteDenied", prefix: "child-ro-prefix", check: checkDenyNodeWrite},
{name: "ChildRWSuffixReadAllowed", prefix: "child-rw-prefix", check: checkAllowNodeRead},
{name: "ChildRWSuffixWriteAllowed", prefix: "child-rw-prefix", check: checkAllowNodeWrite},
{name: "ChildOverrideReadAllowed", prefix: "override", check: checkAllowNodeRead},
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowNodeWrite},
},
},
{
name: "NodeDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Nodes: []*NodeRule{
{
Name: "root-nope",
Policy: PolicyDeny,
},
{
Name: "root-ro",
Policy: PolicyRead,
},
{
Name: "root-rw",
Policy: PolicyWrite,
},
{
Name: "override",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Nodes: []*NodeRule{
{
Name: "child-nope",
Policy: PolicyDeny,
},
{
Name: "child-ro",
Policy: PolicyRead,
},
{
Name: "child-rw",
Policy: PolicyWrite,
},
{
Name: "override",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
},
checks: []aclCheck{
{name: "ReadAllDenied", prefix: "", check: checkDenyNodeReadAll},
{name: "DefaultReadAllowed", prefix: "nope", check: checkAllowNodeRead},
{name: "DefaultWriteAllowed", prefix: "nope", check: checkAllowNodeWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenyNodeRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenyNodeWrite},
{name: "ROReadAllowed", prefix: "root-ro", check: checkAllowNodeRead},
{name: "ROWriteDenied", prefix: "root-ro", check: checkDenyNodeWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowNodeRead},
{name: "RWWriteAllowed", prefix: "root-rw", check: checkAllowNodeWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-prefix", check: checkDenyNodeRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-prefix", check: checkDenyNodeWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro-prefix", check: checkAllowNodeRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro-prefix", check: checkDenyNodeWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-prefix", check: checkAllowNodeRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-prefix", check: checkAllowNodeWrite},
{name: "ChildDenyReadDenied", prefix: "child-nope", check: checkDenyNodeRead},
{name: "ChildDenyWriteDenied", prefix: "child-nope", check: checkDenyNodeWrite},
{name: "ChildROReadAllowed", prefix: "child-ro", check: checkAllowNodeRead},
{name: "ChildROWriteDenied", prefix: "child-ro", check: checkDenyNodeWrite},
{name: "ChildRWReadAllowed", prefix: "child-rw", check: checkAllowNodeRead},
{name: "ChildRWWriteAllowed", prefix: "child-rw", check: checkAllowNodeWrite},
{name: "ChildDenySuffixReadDenied", prefix: "child-nope-prefix", check: checkDenyNodeRead},
{name: "ChildDenySuffixWriteDenied", prefix: "child-nope-prefix", check: checkDenyNodeWrite},
{name: "ChildROSuffixReadAllowed", prefix: "child-ro-prefix", check: checkAllowNodeRead},
{name: "ChildROSuffixWriteDenied", prefix: "child-ro-prefix", check: checkDenyNodeWrite},
{name: "ChildRWSuffixReadAllowed", prefix: "child-rw-prefix", check: checkAllowNodeRead},
{name: "ChildRWSuffixWriteAllowed", prefix: "child-rw-prefix", check: checkAllowNodeWrite},
{name: "ChildOverrideReadAllowed", prefix: "override", check: checkAllowNodeRead},
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowNodeWrite},
},
},
{
name: "SessionDefaultDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Sessions: []*SessionRule{
{
Node: "root-nope",
Policy: PolicyDeny,
},
{
Node: "root-ro",
Policy: PolicyRead,
},
{
Node: "root-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Sessions: []*SessionRule{
{
Node: "child-nope",
Policy: PolicyDeny,
},
{
Node: "child-ro",
Policy: PolicyRead,
},
{
Node: "child-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
},
checks: []aclCheck{
{name: "DefaultReadDenied", prefix: "nope", check: checkDenySessionRead},
{name: "DefaultWriteDenied", prefix: "nope", check: checkDenySessionWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenySessionRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenySessionWrite},
{name: "ROReadAllowed", prefix: "root-ro", check: checkAllowSessionRead},
{name: "ROWriteDenied", prefix: "root-ro", check: checkDenySessionWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowSessionRead},
{name: "RWWriteAllowed", prefix: "root-rw", check: checkAllowSessionWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-prefix", check: checkDenySessionRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-prefix", check: checkDenySessionWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro-prefix", check: checkAllowSessionRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro-prefix", check: checkDenySessionWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-prefix", check: checkAllowSessionRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-prefix", check: checkAllowSessionWrite},
{name: "ChildDenyReadDenied", prefix: "child-nope", check: checkDenySessionRead},
{name: "ChildDenyWriteDenied", prefix: "child-nope", check: checkDenySessionWrite},
{name: "ChildROReadAllowed", prefix: "child-ro", check: checkAllowSessionRead},
{name: "ChildROWriteDenied", prefix: "child-ro", check: checkDenySessionWrite},
{name: "ChildRWReadAllowed", prefix: "child-rw", check: checkAllowSessionRead},
{name: "ChildRWWriteAllowed", prefix: "child-rw", check: checkAllowSessionWrite},
{name: "ChildDenySuffixReadDenied", prefix: "child-nope-prefix", check: checkDenySessionRead},
{name: "ChildDenySuffixWriteDenied", prefix: "child-nope-prefix", check: checkDenySessionWrite},
{name: "ChildROSuffixReadAllowed", prefix: "child-ro-prefix", check: checkAllowSessionRead},
{name: "ChildROSuffixWriteDenied", prefix: "child-ro-prefix", check: checkDenySessionWrite},
{name: "ChildRWSuffixReadAllowed", prefix: "child-rw-prefix", check: checkAllowSessionRead},
{name: "ChildRWSuffixWriteAllowed", prefix: "child-rw-prefix", check: checkAllowSessionWrite},
{name: "ChildOverrideReadAllowed", prefix: "override", check: checkAllowSessionRead},
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowSessionWrite},
},
},
{
name: "SessionDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Sessions: []*SessionRule{
{
Node: "root-nope",
Policy: PolicyDeny,
},
{
Node: "root-ro",
Policy: PolicyRead,
},
{
Node: "root-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Sessions: []*SessionRule{
{
Node: "child-nope",
Policy: PolicyDeny,
},
{
Node: "child-ro",
Policy: PolicyRead,
},
{
Node: "child-rw",
Policy: PolicyWrite,
},
{
Node: "override",
Policy: PolicyWrite,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
},
checks: []aclCheck{
{name: "DefaultReadAllowed", prefix: "nope", check: checkAllowSessionRead},
{name: "DefaultWriteAllowed", prefix: "nope", check: checkAllowSessionWrite},
{name: "DenyReadDenied", prefix: "root-nope", check: checkDenySessionRead},
{name: "DenyWriteDenied", prefix: "root-nope", check: checkDenySessionWrite},
{name: "ROReadAllowed", prefix: "root-ro", check: checkAllowSessionRead},
{name: "ROWriteDenied", prefix: "root-ro", check: checkDenySessionWrite},
{name: "RWReadAllowed", prefix: "root-rw", check: checkAllowSessionRead},
{name: "RWWriteAllowed", prefix: "root-rw", check: checkAllowSessionWrite},
{name: "DenySuffixReadDenied", prefix: "root-nope-prefix", check: checkDenySessionRead},
{name: "DenySuffixWriteDenied", prefix: "root-nope-prefix", check: checkDenySessionWrite},
{name: "ROSuffixReadAllowed", prefix: "root-ro-prefix", check: checkAllowSessionRead},
{name: "ROSuffixWriteDenied", prefix: "root-ro-prefix", check: checkDenySessionWrite},
{name: "RWSuffixReadAllowed", prefix: "root-rw-prefix", check: checkAllowSessionRead},
{name: "RWSuffixWriteAllowed", prefix: "root-rw-prefix", check: checkAllowSessionWrite},
{name: "ChildDenyReadDenied", prefix: "child-nope", check: checkDenySessionRead},
{name: "ChildDenyWriteDenied", prefix: "child-nope", check: checkDenySessionWrite},
{name: "ChildROReadAllowed", prefix: "child-ro", check: checkAllowSessionRead},
{name: "ChildROWriteDenied", prefix: "child-ro", check: checkDenySessionWrite},
{name: "ChildRWReadAllowed", prefix: "child-rw", check: checkAllowSessionRead},
{name: "ChildRWWriteAllowed", prefix: "child-rw", check: checkAllowSessionWrite},
{name: "ChildDenySuffixReadDenied", prefix: "child-nope-prefix", check: checkDenySessionRead},
{name: "ChildDenySuffixWriteDenied", prefix: "child-nope-prefix", check: checkDenySessionWrite},
{name: "ChildROSuffixReadAllowed", prefix: "child-ro-prefix", check: checkAllowSessionRead},
{name: "ChildROSuffixWriteDenied", prefix: "child-ro-prefix", check: checkDenySessionWrite},
{name: "ChildRWSuffixReadAllowed", prefix: "child-rw-prefix", check: checkAllowSessionRead},
{name: "ChildRWSuffixWriteAllowed", prefix: "child-rw-prefix", check: checkAllowSessionWrite},
{name: "ChildOverrideReadAllowed", prefix: "override", check: checkAllowSessionRead},
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowSessionWrite},
},
},
{
name: "Parent",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Keys: []*KeyRule{
{
Prefix: "foo/",
Policy: PolicyWrite,
},
{
Prefix: "bar/",
Policy: PolicyRead,
},
},
PreparedQueries: []*PreparedQueryRule{
{
Prefix: "other",
Policy: PolicyWrite,
},
{
Prefix: "foo",
Policy: PolicyRead,
},
},
Services: []*ServiceRule{
{
Name: "other",
Policy: PolicyWrite,
},
{
Name: "foo",
Policy: PolicyRead,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Keys: []*KeyRule{
{
Prefix: "foo/priv/",
Policy: PolicyRead,
},
{
Prefix: "bar/",
Policy: PolicyDeny,
},
{
Prefix: "zip/",
Policy: PolicyRead,
},
},
PreparedQueries: []*PreparedQueryRule{
{
Prefix: "bar",
Policy: PolicyDeny,
},
},
Services: []*ServiceRule{
{
Name: "bar",
Policy: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
},
checks: []aclCheck{
{name: "ServiceReadAllDenied", prefix: "", check: checkDenyServiceReadAll},
{name: "KeyReadDenied", prefix: "other", check: checkDenyKeyRead},
{name: "KeyWriteDenied", prefix: "other", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "other", check: checkDenyKeyWritePrefix},
{name: "KeyReadAllowed", prefix: "foo/test", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "foo/test", check: checkAllowKeyWrite},
{name: "KeyWritePrefixAllowed", prefix: "foo/test", check: checkAllowKeyWritePrefix},
{name: "KeyReadAllowed", prefix: "foo/priv/test", check: checkAllowKeyRead},
{name: "KeyWriteDenied", prefix: "foo/priv/test", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "foo/priv/test", check: checkDenyKeyWritePrefix},
{name: "KeyReadDenied", prefix: "bar/any", check: checkDenyKeyRead},
{name: "KeyWriteDenied", prefix: "bar/any", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "bar/any", check: checkDenyKeyWritePrefix},
{name: "KeyReadAllowed", prefix: "zip/test", check: checkAllowKeyRead},
{name: "KeyWriteDenied", prefix: "zip/test", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "zip/test", check: checkDenyKeyWritePrefix},
{name: "ServiceReadDenied", prefix: "fail", check: checkDenyServiceRead},
{name: "ServiceWriteDenied", prefix: "fail", check: checkDenyServiceWrite},
{name: "ServiceReadAllowed", prefix: "other", check: checkAllowServiceRead},
{name: "ServiceWriteAllowed", prefix: "other", check: checkAllowServiceWrite},
{name: "ServiceReadAllowed", prefix: "foo", check: checkAllowServiceRead},
{name: "ServiceWriteDenied", prefix: "foo", check: checkDenyServiceWrite},
{name: "ServiceReadDenied", prefix: "bar", check: checkDenyServiceRead},
{name: "ServiceWriteDenied", prefix: "bar", check: checkDenyServiceWrite},
{name: "PreparedQueryReadAllowed", prefix: "foo", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "foo", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "foobar", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "foobar", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "bar", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "bar", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "barbaz", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "barbaz", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "baz", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "baz", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "nope", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "nope", check: checkDenyPreparedQueryWrite},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
{name: "ACLReadDenied", check: checkDenyACLRead},
{name: "ACLWriteDenied", check: checkDenyACLWrite},
{name: "SnapshotDenied", check: checkDenySnapshot},
{name: "IntentionDefaultAllowDenied", check: checkDenyIntentionDefaultAllow},
},
},
{
name: "ComplexDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
legacyPolicy(&Policy{
PolicyRules: PolicyRules{
Events: []*EventRule{
{
Event: "",
Policy: PolicyRead,
},
{
Event: "foo",
Policy: PolicyWrite,
},
{
Event: "bar",
Policy: PolicyDeny,
},
},
Keys: []*KeyRule{
{
Prefix: "foo/",
Policy: PolicyWrite,
},
{
Prefix: "foo/priv/",
Policy: PolicyDeny,
},
{
Prefix: "bar/",
Policy: PolicyDeny,
},
{
Prefix: "zip/",
Policy: PolicyRead,
},
{
Prefix: "zap/",
Policy: PolicyList,
},
},
PreparedQueries: []*PreparedQueryRule{
{
Prefix: "",
Policy: PolicyRead,
},
{
Prefix: "foo",
Policy: PolicyWrite,
},
{
Prefix: "bar",
Policy: PolicyDeny,
},
{
Prefix: "zoo",
Policy: PolicyWrite,
},
},
Services: []*ServiceRule{
{
Name: "",
Policy: PolicyWrite,
},
{
Name: "foo",
Policy: PolicyRead,
},
{
Name: "bar",
Policy: PolicyDeny,
},
{
Name: "barfoo",
Policy: PolicyWrite,
Intentions: PolicyWrite,
},
{
Name: "intbaz",
Policy: PolicyWrite,
Intentions: PolicyDeny,
},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}),
},
checks: []aclCheck{
{name: "ServiceReadAllDenied", prefix: "", check: checkDenyServiceReadAll},
{name: "KeyReadAllowed", prefix: "other", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "other", check: checkAllowKeyWrite},
{name: "KeyWritePrefixAllowed", prefix: "other", check: checkAllowKeyWritePrefix},
{name: "KeyListAllowed", prefix: "other", check: checkAllowKeyList},
{name: "KeyReadAllowed", prefix: "foo/test", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "foo/test", check: checkAllowKeyWrite},
{name: "KeyWritePrefixAllowed", prefix: "foo/test", check: checkAllowKeyWritePrefix},
{name: "KeyListAllowed", prefix: "foo/test", check: checkAllowKeyList},
{name: "KeyReadDenied", prefix: "foo/priv/test", check: checkDenyKeyRead},
{name: "KeyWriteDenied", prefix: "foo/priv/test", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "foo/priv/test", check: checkDenyKeyWritePrefix},
{name: "KeyListDenied", prefix: "foo/priv/test", check: checkDenyKeyList},
{name: "KeyReadDenied", prefix: "bar/any", check: checkDenyKeyRead},
{name: "KeyWriteDenied", prefix: "bar/any", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "bar/any", check: checkDenyKeyWritePrefix},
{name: "KeyListDenied", prefix: "bar/any", check: checkDenyKeyList},
{name: "KeyReadAllowed", prefix: "zip/test", check: checkAllowKeyRead},
{name: "KeyWriteDenied", prefix: "zip/test", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "zip/test", check: checkDenyKeyWritePrefix},
{name: "KeyListDenied", prefix: "zip/test", check: checkDenyKeyList},
{name: "KeyReadAllowed", prefix: "foo/", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "foo/", check: checkAllowKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "foo/", check: checkDenyKeyWritePrefix},
{name: "KeyListAllowed", prefix: "foo/", check: checkAllowKeyList},
{name: "KeyReadAllowed", prefix: "", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "", check: checkAllowKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "", check: checkDenyKeyWritePrefix},
{name: "KeyListAllowed", prefix: "", check: checkAllowKeyList},
{name: "KeyReadAllowed", prefix: "zap/test", check: checkAllowKeyRead},
{name: "KeyWriteDenied", prefix: "zap/test", check: checkDenyKeyWrite},
{name: "KeyWritePrefixDenied", prefix: "zap/test", check: checkDenyKeyWritePrefix},
{name: "KeyListAllowed", prefix: "zap/test", check: checkAllowKeyList},
{name: "IntentionReadAllowed", prefix: "other", check: checkAllowIntentionRead},
{name: "IntentionWriteDenied", prefix: "other", check: checkDenyIntentionWrite},
{name: "IntentionReadAllowed", prefix: "foo", check: checkAllowIntentionRead},
{name: "IntentionWriteDenied", prefix: "foo", check: checkDenyIntentionWrite},
{name: "IntentionReadDenied", prefix: "bar", check: checkDenyIntentionRead},
{name: "IntentionWriteDenied", prefix: "bar", check: checkDenyIntentionWrite},
{name: "IntentionReadAllowed", prefix: "foobar", check: checkAllowIntentionRead},
{name: "IntentionWriteDenied", prefix: "foobar", check: checkDenyIntentionWrite},
{name: "IntentionReadDenied", prefix: "barfo", check: checkDenyIntentionRead},
{name: "IntentionWriteDenied", prefix: "barfo", check: checkDenyIntentionWrite},
{name: "IntentionReadAllowed", prefix: "barfoo", check: checkAllowIntentionRead},
{name: "IntentionWriteAllowed", prefix: "barfoo", check: checkAllowIntentionWrite},
{name: "IntentionReadAllowed", prefix: "barfoo2", check: checkAllowIntentionRead},
{name: "IntentionWriteAllowed", prefix: "barfoo2", check: checkAllowIntentionWrite},
{name: "IntentionReadDenied", prefix: "intbaz", check: checkDenyIntentionRead},
{name: "IntentionWriteDenied", prefix: "intbaz", check: checkDenyIntentionWrite},
{name: "IntentionDefaultAllowAllowed", check: checkAllowIntentionDefaultAllow},
{name: "ServiceReadAllowed", prefix: "other", check: checkAllowServiceRead},
{name: "ServiceWriteAllowed", prefix: "other", check: checkAllowServiceWrite},
{name: "ServiceReadAllowed", prefix: "foo", check: checkAllowServiceRead},
{name: "ServiceWriteDenied", prefix: "foo", check: checkDenyServiceWrite},
{name: "ServiceReadDenied", prefix: "bar", check: checkDenyServiceRead},
{name: "ServiceWriteDenied", prefix: "bar", check: checkDenyServiceWrite},
{name: "ServiceReadAllowed", prefix: "foobar", check: checkAllowServiceRead},
{name: "ServiceWriteDenied", prefix: "foobar", check: checkDenyServiceWrite},
{name: "ServiceReadDenied", prefix: "barfo", check: checkDenyServiceRead},
{name: "ServiceWriteDenied", prefix: "barfo", check: checkDenyServiceWrite},
{name: "ServiceReadAllowed", prefix: "barfoo", check: checkAllowServiceRead},
{name: "ServiceWriteAllowed", prefix: "barfoo", check: checkAllowServiceWrite},
{name: "ServiceReadAllowed", prefix: "barfoo2", check: checkAllowServiceRead},
{name: "ServiceWriteAllowed", prefix: "barfoo2", check: checkAllowServiceWrite},
{name: "EventReadAllowed", prefix: "foo", check: checkAllowEventRead},
{name: "EventWriteAllowed", prefix: "foo", check: checkAllowEventWrite},
{name: "EventReadAllowed", prefix: "foobar", check: checkAllowEventRead},
{name: "EventWriteAllowed", prefix: "foobar", check: checkAllowEventWrite},
{name: "EventReadDenied", prefix: "bar", check: checkDenyEventRead},
{name: "EventWriteDenied", prefix: "bar", check: checkDenyEventWrite},
{name: "EventReadDenied", prefix: "barbaz", check: checkDenyEventRead},
{name: "EventWriteDenied", prefix: "barbaz", check: checkDenyEventWrite},
{name: "EventReadAllowed", prefix: "baz", check: checkAllowEventRead},
{name: "EventWriteDenied", prefix: "baz", check: checkDenyEventWrite},
{name: "PreparedQueryReadAllowed", prefix: "foo", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteAllowed", prefix: "foo", check: checkAllowPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "foobar", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteAllowed", prefix: "foobar", check: checkAllowPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "bar", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "bar", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "barbaz", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "barbaz", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "baz", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "baz", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "nope", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "nope", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "zoo", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteAllowed", prefix: "zoo", check: checkAllowPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "zookeeper", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteAllowed", prefix: "zookeeper", check: checkAllowPreparedQueryWrite},
},
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
{
name: "ExactMatchPrecedence",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Agents: []*AgentRule{
{
Node: "foo",
Policy: PolicyWrite,
},
{
Node: "football",
Policy: PolicyDeny,
},
},
AgentPrefixes: []*AgentRule{
{
Node: "foot",
Policy: PolicyRead,
},
{
Node: "fo",
Policy: PolicyRead,
},
},
Keys: []*KeyRule{
{
Prefix: "foo",
Policy: PolicyWrite,
},
{
Prefix: "football",
Policy: PolicyDeny,
},
},
KeyPrefixes: []*KeyRule{
{
Prefix: "foot",
Policy: PolicyRead,
},
{
Prefix: "fo",
Policy: PolicyRead,
},
},
Nodes: []*NodeRule{
{
Name: "foo",
Policy: PolicyWrite,
},
{
Name: "football",
Policy: PolicyDeny,
},
},
NodePrefixes: []*NodeRule{
{
Name: "foot",
Policy: PolicyRead,
},
{
Name: "fo",
Policy: PolicyRead,
},
},
Services: []*ServiceRule{
{
Name: "foo",
Policy: PolicyWrite,
Intentions: PolicyWrite,
},
{
Name: "football",
Policy: PolicyDeny,
},
},
ServicePrefixes: []*ServiceRule{
{
Name: "foot",
Policy: PolicyRead,
Intentions: PolicyRead,
},
{
Name: "fo",
Policy: PolicyRead,
Intentions: PolicyRead,
},
},
Sessions: []*SessionRule{
{
Node: "foo",
Policy: PolicyWrite,
},
{
Node: "football",
Policy: PolicyDeny,
},
},
SessionPrefixes: []*SessionRule{
{
Node: "foot",
Policy: PolicyRead,
},
{
Node: "fo",
Policy: PolicyRead,
},
},
Events: []*EventRule{
{
Event: "foo",
Policy: PolicyWrite,
},
{
Event: "football",
Policy: PolicyDeny,
},
},
EventPrefixes: []*EventRule{
{
Event: "foot",
Policy: PolicyRead,
},
{
Event: "fo",
Policy: PolicyRead,
},
},
PreparedQueries: []*PreparedQueryRule{
{
Prefix: "foo",
Policy: PolicyWrite,
},
{
Prefix: "football",
Policy: PolicyDeny,
},
},
PreparedQueryPrefixes: []*PreparedQueryRule{
{
Prefix: "foot",
Policy: PolicyRead,
},
{
Prefix: "fo",
Policy: PolicyRead,
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
},
},
},
},
checks: []aclCheck{
{name: "NodeReadAllDenied", prefix: "", check: checkDenyNodeReadAll},
{name: "ServiceReadAllDenied", prefix: "", check: checkDenyServiceReadAll},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
{name: "AgentReadPrefixAllowed", prefix: "fo", check: checkAllowAgentRead},
{name: "AgentWritePrefixDenied", prefix: "fo", check: checkDenyAgentWrite},
{name: "AgentReadPrefixAllowed", prefix: "for", check: checkAllowAgentRead},
{name: "AgentWritePrefixDenied", prefix: "for", check: checkDenyAgentWrite},
{name: "AgentReadAllowed", prefix: "foo", check: checkAllowAgentRead},
{name: "AgentWriteAllowed", prefix: "foo", check: checkAllowAgentWrite},
{name: "AgentReadPrefixAllowed", prefix: "foot", check: checkAllowAgentRead},
{name: "AgentWritePrefixDenied", prefix: "foot", check: checkDenyAgentWrite},
{name: "AgentReadPrefixAllowed", prefix: "foot2", check: checkAllowAgentRead},
{name: "AgentWritePrefixDenied", prefix: "foot2", check: checkDenyAgentWrite},
{name: "AgentReadPrefixAllowed", prefix: "food", check: checkAllowAgentRead},
{name: "AgentWritePrefixDenied", prefix: "food", check: checkDenyAgentWrite},
{name: "AgentReadDenied", prefix: "football", check: checkDenyAgentRead},
{name: "AgentWriteDenied", prefix: "football", check: checkDenyAgentWrite},
{name: "KeyReadPrefixAllowed", prefix: "fo", check: checkAllowKeyRead},
{name: "KeyWritePrefixDenied", prefix: "fo", check: checkDenyKeyWrite},
{name: "KeyReadPrefixAllowed", prefix: "for", check: checkAllowKeyRead},
{name: "KeyWritePrefixDenied", prefix: "for", check: checkDenyKeyWrite},
{name: "KeyReadAllowed", prefix: "foo", check: checkAllowKeyRead},
{name: "KeyWriteAllowed", prefix: "foo", check: checkAllowKeyWrite},
{name: "KeyReadPrefixAllowed", prefix: "foot", check: checkAllowKeyRead},
{name: "KeyWritePrefixDenied", prefix: "foot", check: checkDenyKeyWrite},
{name: "KeyReadPrefixAllowed", prefix: "foot2", check: checkAllowKeyRead},
{name: "KeyWritePrefixDenied", prefix: "foot2", check: checkDenyKeyWrite},
{name: "KeyReadPrefixAllowed", prefix: "food", check: checkAllowKeyRead},
{name: "KeyWritePrefixDenied", prefix: "food", check: checkDenyKeyWrite},
{name: "KeyReadDenied", prefix: "football", check: checkDenyKeyRead},
{name: "KeyWriteDenied", prefix: "football", check: checkDenyKeyWrite},
{name: "NodeReadPrefixAllowed", prefix: "fo", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "fo", check: checkDenyNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "for", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "for", check: checkDenyNodeWrite},
{name: "NodeReadAllowed", prefix: "foo", check: checkAllowNodeRead},
{name: "NodeWriteAllowed", prefix: "foo", check: checkAllowNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "foot", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "foot", check: checkDenyNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "foot2", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "foot2", check: checkDenyNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "food", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "food", check: checkDenyNodeWrite},
{name: "NodeReadDenied", prefix: "football", check: checkDenyNodeRead},
{name: "NodeWriteDenied", prefix: "football", check: checkDenyNodeWrite},
{name: "ServiceReadPrefixAllowed", prefix: "fo", check: checkAllowServiceRead},
{name: "ServiceWritePrefixDenied", prefix: "fo", check: checkDenyServiceWrite},
{name: "ServiceReadPrefixAllowed", prefix: "for", check: checkAllowServiceRead},
{name: "ServiceWritePrefixDenied", prefix: "for", check: checkDenyServiceWrite},
{name: "ServiceReadAllowed", prefix: "foo", check: checkAllowServiceRead},
{name: "ServiceWriteAllowed", prefix: "foo", check: checkAllowServiceWrite},
{name: "ServiceReadPrefixAllowed", prefix: "foot", check: checkAllowServiceRead},
{name: "ServiceWritePrefixDenied", prefix: "foot", check: checkDenyServiceWrite},
{name: "ServiceReadPrefixAllowed", prefix: "foot2", check: checkAllowServiceRead},
{name: "ServiceWritePrefixDenied", prefix: "foot2", check: checkDenyServiceWrite},
{name: "ServiceReadPrefixAllowed", prefix: "food", check: checkAllowServiceRead},
{name: "ServiceWritePrefixDenied", prefix: "food", check: checkDenyServiceWrite},
{name: "ServiceReadDenied", prefix: "football", check: checkDenyServiceRead},
{name: "ServiceWriteDenied", prefix: "football", check: checkDenyServiceWrite},
{name: "NodeReadPrefixAllowed", prefix: "fo", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "fo", check: checkDenyNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "for", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "for", check: checkDenyNodeWrite},
{name: "NodeReadAllowed", prefix: "foo", check: checkAllowNodeRead},
{name: "NodeWriteAllowed", prefix: "foo", check: checkAllowNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "foot", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "foot", check: checkDenyNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "foot2", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "foot2", check: checkDenyNodeWrite},
{name: "NodeReadPrefixAllowed", prefix: "food", check: checkAllowNodeRead},
{name: "NodeWritePrefixDenied", prefix: "food", check: checkDenyNodeWrite},
{name: "NodeReadDenied", prefix: "football", check: checkDenyNodeRead},
{name: "NodeWriteDenied", prefix: "football", check: checkDenyNodeWrite},
{name: "IntentionReadPrefixAllowed", prefix: "fo", check: checkAllowIntentionRead},
{name: "IntentionWritePrefixDenied", prefix: "fo", check: checkDenyIntentionWrite},
{name: "IntentionReadPrefixAllowed", prefix: "for", check: checkAllowIntentionRead},
{name: "IntentionWritePrefixDenied", prefix: "for", check: checkDenyIntentionWrite},
{name: "IntentionReadAllowed", prefix: "foo", check: checkAllowIntentionRead},
{name: "IntentionWriteAllowed", prefix: "foo", check: checkAllowIntentionWrite},
{name: "IntentionReadPrefixAllowed", prefix: "foot", check: checkAllowIntentionRead},
{name: "IntentionWritePrefixDenied", prefix: "foot", check: checkDenyIntentionWrite},
{name: "IntentionReadPrefixAllowed", prefix: "foot2", check: checkAllowIntentionRead},
{name: "IntentionWritePrefixDenied", prefix: "foot2", check: checkDenyIntentionWrite},
{name: "IntentionReadPrefixAllowed", prefix: "food", check: checkAllowIntentionRead},
{name: "IntentionWritePrefixDenied", prefix: "food", check: checkDenyIntentionWrite},
{name: "IntentionReadDenied", prefix: "football", check: checkDenyIntentionRead},
{name: "IntentionWriteDenied", prefix: "football", check: checkDenyIntentionWrite},
{name: "SessionReadPrefixAllowed", prefix: "fo", check: checkAllowSessionRead},
{name: "SessionWritePrefixDenied", prefix: "fo", check: checkDenySessionWrite},
{name: "SessionReadPrefixAllowed", prefix: "for", check: checkAllowSessionRead},
{name: "SessionWritePrefixDenied", prefix: "for", check: checkDenySessionWrite},
{name: "SessionReadAllowed", prefix: "foo", check: checkAllowSessionRead},
{name: "SessionWriteAllowed", prefix: "foo", check: checkAllowSessionWrite},
{name: "SessionReadPrefixAllowed", prefix: "foot", check: checkAllowSessionRead},
{name: "SessionWritePrefixDenied", prefix: "foot", check: checkDenySessionWrite},
{name: "SessionReadPrefixAllowed", prefix: "foot2", check: checkAllowSessionRead},
{name: "SessionWritePrefixDenied", prefix: "foot2", check: checkDenySessionWrite},
{name: "SessionReadPrefixAllowed", prefix: "food", check: checkAllowSessionRead},
{name: "SessionWritePrefixDenied", prefix: "food", check: checkDenySessionWrite},
{name: "SessionReadDenied", prefix: "football", check: checkDenySessionRead},
{name: "SessionWriteDenied", prefix: "football", check: checkDenySessionWrite},
{name: "EventReadPrefixAllowed", prefix: "fo", check: checkAllowEventRead},
{name: "EventWritePrefixDenied", prefix: "fo", check: checkDenyEventWrite},
{name: "EventReadPrefixAllowed", prefix: "for", check: checkAllowEventRead},
{name: "EventWritePrefixDenied", prefix: "for", check: checkDenyEventWrite},
{name: "EventReadAllowed", prefix: "foo", check: checkAllowEventRead},
{name: "EventWriteAllowed", prefix: "foo", check: checkAllowEventWrite},
{name: "EventReadPrefixAllowed", prefix: "foot", check: checkAllowEventRead},
{name: "EventWritePrefixDenied", prefix: "foot", check: checkDenyEventWrite},
{name: "EventReadPrefixAllowed", prefix: "foot2", check: checkAllowEventRead},
{name: "EventWritePrefixDenied", prefix: "foot2", check: checkDenyEventWrite},
{name: "EventReadPrefixAllowed", prefix: "food", check: checkAllowEventRead},
{name: "EventWritePrefixDenied", prefix: "food", check: checkDenyEventWrite},
{name: "EventReadDenied", prefix: "football", check: checkDenyEventRead},
{name: "EventWriteDenied", prefix: "football", check: checkDenyEventWrite},
{name: "PreparedQueryReadPrefixAllowed", prefix: "fo", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWritePrefixDenied", prefix: "fo", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadPrefixAllowed", prefix: "for", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWritePrefixDenied", prefix: "for", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadAllowed", prefix: "foo", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWriteAllowed", prefix: "foo", check: checkAllowPreparedQueryWrite},
{name: "PreparedQueryReadPrefixAllowed", prefix: "foot", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWritePrefixDenied", prefix: "foot", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadPrefixAllowed", prefix: "foot2", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWritePrefixDenied", prefix: "foot2", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadPrefixAllowed", prefix: "food", check: checkAllowPreparedQueryRead},
{name: "PreparedQueryWritePrefixDenied", prefix: "food", check: checkDenyPreparedQueryWrite},
{name: "PreparedQueryReadDenied", prefix: "football", check: checkDenyPreparedQueryRead},
{name: "PreparedQueryWriteDenied", prefix: "football", check: checkDenyPreparedQueryWrite},
},
},
{
name: "ACLRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
ACL: PolicyRead,
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowACLRead},
// in version 1.2.1 and below this would have failed
{name: "WriteDenied", check: checkDenyACLWrite},
},
},
{
name: "ACLRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
ACL: PolicyWrite,
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowACLRead},
// in version 1.2.1 and below this would have failed
{name: "WriteAllowed", check: checkAllowACLWrite},
},
},
{
name: "KeyWritePrefixDefaultDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
KeyPrefixes: []*KeyRule{
{
Prefix: "fo",
Policy: PolicyRead,
},
{
Prefix: "foo/",
Policy: PolicyWrite,
},
{
Prefix: "bar/",
Policy: PolicyWrite,
},
{
Prefix: "baz/",
Policy: PolicyWrite,
},
{
Prefix: "test/",
Policy: PolicyWrite,
},
},
Keys: []*KeyRule{
{
Prefix: "foo/bar",
Policy: PolicyWrite,
},
{
Prefix: "bar/baz",
Policy: PolicyRead,
},
},
},
},
},
checks: []aclCheck{
// Ensure we deny access if the best match key_prefix rule does not grant
// write access (disregards both the default policy and any other rules with
// segments that may fall within the given kv prefix)
{name: "DeniedTopLevelPrefix", prefix: "foo", check: checkDenyKeyWritePrefix},
// Allow recursive KV writes when we have a prefix rule that allows it and no
// other rules with segments that fall within the requested kv prefix to be written.
{name: "AllowedTopLevelPrefix", prefix: "baz/", check: checkAllowKeyWritePrefix},
// Ensure we allow recursive KV writes when we have a prefix rule that would allow it
// and all other rules with segments prefixed by the kv prefix to be written also allow
// write access.
{name: "AllowedPrefixWithNestedWrite", prefix: "foo/", check: checkAllowKeyWritePrefix},
// Ensure that we deny recursive KV writes when they would be allowed for a prefix but
// denied by either an exact match rule or prefix match rule for a segment prefixed by
// the kv prefix being checked against.
{name: "DenyPrefixWithNestedRead", prefix: "bar/", check: checkDenyKeyWritePrefix},
// Ensure that the default deny policy is used when there is no key_prefix rule
// for the given kv segment regardless of any rules that would grant write access
// to segments prefixed by the kv prefix being checked against.
{name: "DenyNoPrefixMatch", prefix: "te", check: checkDenyKeyWritePrefix},
},
},
{
name: "KeyWritePrefixDefaultAllow",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Keys: []*KeyRule{
{
Prefix: "foo/bar",
Policy: PolicyRead,
},
},
},
},
},
checks: []aclCheck{
// Ensure that we deny a key prefix write when a rule for a key within our prefix
// doesn't allow writing and the default policy is to allow
{name: "KeyWritePrefixDenied", prefix: "foo", check: checkDenyKeyWritePrefix},
// Ensure that the default allow policy is used when there is no prefix rule
// and there are no other rules regarding keys within that prefix to be written.
{name: "KeyWritePrefixAllowed", prefix: "bar", check: checkAllowKeyWritePrefix},
},
},
}
2021-08-20 22:11:01 +00:00
run := func(t *testing.T, tcase aclTest, defaultPolicy Authorizer) {
acl := defaultPolicy
for _, policy := range tcase.policyStack {
newACL, err := NewPolicyAuthorizerWithDefaults(acl, []*Policy{policy}, nil)
require.NoError(t, err)
acl = newACL
}
for _, check := range tcase.checks {
checkName := check.name
if check.prefix != "" {
checkName = fmt.Sprintf("%s.Prefix(%s)", checkName, check.prefix)
}
2021-08-20 22:11:01 +00:00
t.Run(checkName, func(t *testing.T) {
check.check(t, acl, check.prefix, nil)
})
}
}
2021-08-20 22:11:01 +00:00
for _, tcase := range tests {
t.Run(tcase.name, func(t *testing.T) {
if tcase.defaultPolicy == nil {
t.Run("default-allow", func(t *testing.T) {
run(t, tcase, AllowAll())
})
t.Run("default-deny", func(t *testing.T) {
run(t, tcase, DenyAll())
})
2021-08-20 22:11:01 +00:00
} else {
run(t, tcase, tcase.defaultPolicy)
}
})
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func TestRootAuthorizer(t *testing.T) {
require.Equal(t, AllowAll(), RootAuthorizer("allow"))
require.Equal(t, DenyAll(), RootAuthorizer("deny"))
require.Equal(t, ManageAll(), RootAuthorizer("manage"))
require.Nil(t, RootAuthorizer("foo"))
}
func TestACLEnforce(t *testing.T) {
type enforceTest struct {
name string
rule AccessLevel
required AccessLevel
expected EnforcementDecision
}
tests := []enforceTest{
{
name: "RuleNoneRequireRead",
rule: AccessUnknown,
required: AccessRead,
expected: Default,
},
{
name: "RuleNoneRequireWrite",
rule: AccessUnknown,
required: AccessWrite,
expected: Default,
},
{
name: "RuleNoneRequireList",
rule: AccessUnknown,
required: AccessList,
expected: Default,
},
{
name: "RuleReadRequireRead",
rule: AccessRead,
required: AccessRead,
expected: Allow,
},
{
name: "RuleReadRequireWrite",
rule: AccessRead,
required: AccessWrite,
expected: Deny,
},
{
name: "RuleReadRequireList",
rule: AccessRead,
required: AccessList,
expected: Deny,
},
{
name: "RuleListRequireRead",
rule: AccessList,
required: AccessRead,
expected: Allow,
},
{
name: "RuleListRequireWrite",
rule: AccessList,
required: AccessWrite,
expected: Deny,
},
{
name: "RuleListRequireList",
rule: AccessList,
required: AccessList,
expected: Allow,
},
{
name: "RuleWritetRequireRead",
rule: AccessWrite,
required: AccessRead,
expected: Allow,
},
{
name: "RuleWritetRequireWrite",
rule: AccessWrite,
required: AccessWrite,
expected: Allow,
},
{
name: "RuleWritetRequireList",
rule: AccessWrite,
required: AccessList,
expected: Allow,
},
{
name: "RuleDenyRequireRead",
rule: AccessDeny,
required: AccessRead,
expected: Deny,
},
{
name: "RuleDenyRequireWrite",
rule: AccessDeny,
required: AccessWrite,
expected: Deny,
},
{
name: "RuleDenyRequireList",
rule: AccessDeny,
required: AccessList,
expected: Deny,
},
}
for _, tcase := range tests {
t.Run(tcase.name, func(t *testing.T) {
require.Equal(t, tcase.expected, enforce(tcase.rule, tcase.required))
})
}
}
func TestACL_ReadAll(t *testing.T) {
type testcase struct {
name string
rules string
check func(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext)
}
tests := []testcase{
{
name: "node:bar:read",
rules: `node "bar" { policy = "read" }`,
check: checkDenyNodeReadAll,
},
{
name: "node:bar:write",
rules: `node "bar" { policy = "write" }`,
check: checkDenyNodeReadAll,
},
{
name: "node:*:read",
rules: `node_prefix "" { policy = "read" }`,
check: checkAllowNodeReadAll,
},
{
name: "node:*:write",
rules: `node_prefix "" { policy = "write" }`,
check: checkAllowNodeReadAll,
},
{
name: "service:bar:read",
rules: `service "bar" { policy = "read" }`,
check: checkDenyServiceReadAll,
},
{
name: "service:bar:write",
rules: `service "bar" { policy = "write" }`,
check: checkDenyServiceReadAll,
},
{
name: "service:*:read",
rules: `service_prefix "" { policy = "read" }`,
check: checkAllowServiceReadAll,
},
{
name: "service:*:write",
rules: `service_prefix "" { policy = "write" }`,
check: checkAllowServiceReadAll,
},
}
body := func(t *testing.T, rules string, defaultPolicy Authorizer, check func(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext)) {
t.Helper()
policy, err := NewPolicyFromSource(rules, SyntaxCurrent, nil, nil)
require.NoError(t, err)
acl, err := NewPolicyAuthorizerWithDefaults(defaultPolicy, []*Policy{policy}, nil)
require.NoError(t, err)
check(t, acl, "", nil)
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Run("default deny", func(t *testing.T) {
body(t, tc.rules, DenyAll(), tc.check)
})
t.Run("default allow", func(t *testing.T) {
body(t, tc.rules, AllowAll(), checkAllowNodeReadAll)
})
})
}
}