open-consul/agent/consul/acl_endpoint.go

2168 lines
61 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2014-08-06 00:05:59 +00:00
package consul
import (
"context"
"errors"
2014-08-06 00:05:59 +00:00
"fmt"
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
"os"
"path/filepath"
2014-08-08 23:00:32 +00:00
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
2014-08-08 23:00:32 +00:00
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/consul/auth"
"github.com/hashicorp/consul/agent/consul/authmethod"
pkg refactor command/agent/* -> agent/* command/consul/* -> agent/consul/* command/agent/command{,_test}.go -> command/agent{,_test}.go command/base/command.go -> command/base.go command/base/* -> command/* commands.go -> command/commands.go The script which did the refactor is: ( cd $GOPATH/src/github.com/hashicorp/consul git mv command/agent/command.go command/agent.go git mv command/agent/command_test.go command/agent_test.go git mv command/agent/flag_slice_value{,_test}.go command/ git mv command/agent . git mv command/base/command.go command/base.go git mv command/base/config_util{,_test}.go command/ git mv commands.go command/ git mv consul agent rmdir command/base/ gsed -i -e 's|package agent|package command|' command/agent{,_test}.go gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go gsed -i -e 's|package main|package command|' command/commands.go gsed -i -e 's|base.Command|BaseCommand|' command/commands.go gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go gsed -i -e 's|base\.||' command/commands.go gsed -i -e 's|command\.||' command/commands.go gsed -i -e 's|command|c|' main.go gsed -i -e 's|range Commands|range command.Commands|' main.go gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go gsed -i -e 's|base.Command|BaseCommand|' command/*.go gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go gsed -i -e 's|base\.||' command/*_test.go gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go # fix imports f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f goimports -w $f f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f goimports -w $f goimports -w command/*.go main.go )
2017-06-09 22:28:28 +00:00
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
"github.com/hashicorp/consul/lib"
2014-08-06 00:05:59 +00:00
)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
const (
// aclBootstrapReset is the file name to create in the data dir. It's only contents
// should be the reset index
aclBootstrapReset = "acl-bootstrap-reset"
)
var ACLEndpointSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"acl", "token", "clone"},
Help: "",
},
{
Name: []string{"acl", "token", "upsert"},
Help: "",
},
{
Name: []string{"acl", "token", "delete"},
Help: "",
},
{
Name: []string{"acl", "policy", "upsert"},
Help: "",
},
{
Name: []string{"acl", "policy", "delete"},
Help: "",
},
{
Name: []string{"acl", "policy", "delete"},
Help: "",
},
{
Name: []string{"acl", "role", "upsert"},
Help: "",
},
{
Name: []string{"acl", "role", "delete"},
Help: "",
},
{
Name: []string{"acl", "bindingrule", "upsert"},
Help: "",
},
{
Name: []string{"acl", "bindingrule", "delete"},
Help: "",
},
{
Name: []string{"acl", "authmethod", "upsert"},
Help: "",
},
{
Name: []string{"acl", "authmethod", "delete"},
Help: "",
},
{
Name: []string{"acl", "login"},
Help: "",
},
{
Name: []string{"acl", "login"},
Help: "",
},
{
Name: []string{"acl", "logout"},
Help: "",
},
{
Name: []string{"acl", "logout"},
Help: "",
},
}
2014-08-06 00:05:59 +00:00
// ACL endpoint is used to manipulate ACLs
type ACL struct {
srv *Server
logger hclog.Logger
2014-08-06 00:05:59 +00:00
}
// fileBootstrapResetIndex retrieves the reset index specified by the administrator from
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// the file on disk.
//
// Q: What is the bootstrap reset index?
// A: If you happen to lose acess to all tokens capable of ACL management you need a way
// to get back into your system. This allows an admin to write the current
// bootstrap "index" into a special file on disk to override the mechanism preventing
// a second token bootstrap. The index will be retrieved by a API call to /v1/acl/bootstrap
// When already bootstrapped this API will return the reset index necessary within
// the error response. Once set in the file, the bootstrap API can be used again to
// get a new token.
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
//
// Q: Why is the reset index not in the config?
// A: We want to be able to remove the reset index once we have used it. This prevents
// accidentally allowing bootstrapping yet again after a snapshot restore.
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func (a *ACL) fileBootstrapResetIndex() uint64 {
// Determine the file path to check
path := filepath.Join(a.srv.config.DataDir, aclBootstrapReset)
// Read the file
raw, err := os.ReadFile(path)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
if !os.IsNotExist(err) {
a.logger.Error("bootstrap: failed to read path",
"path", path,
"error", err,
)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
return 0
}
// Attempt to parse the file
var resetIdx uint64
if _, err := fmt.Sscanf(string(raw), "%d", &resetIdx); err != nil {
a.logger.Error("failed to parse bootstrap reset index path", "path", path, "error", err)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return 0
}
// Return the reset index
a.logger.Debug("parsed bootstrap reset index path", "path", path, "reset_index", resetIdx)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return resetIdx
}
func (a *ACL) removeBootstrapResetFile() {
if err := os.Remove(filepath.Join(a.srv.config.DataDir, aclBootstrapReset)); err != nil {
a.logger.Warn("failed to remove bootstrap file", "error", err)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
}
func (a *ACL) aclPreCheck() error {
if !a.srv.config.ACLsEnabled {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return acl.ErrDisabled
}
return nil
}
// BootstrapTokens is used to perform a one-time ACL bootstrap operation on
// a cluster to get the first management token.
func (a *ACL) BootstrapTokens(args *structs.ACLInitialTokenBootstrapRequest, reply *structs.ACLToken) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.BootstrapTokens", args, reply); done {
return err
}
if err := a.srv.aclBootstrapAllowed(); err != nil {
return err
}
// Verify we are allowed to serve this request
if !a.srv.InPrimaryDatacenter() {
return acl.ErrDisabled
}
// By doing some pre-checks we can head off later bootstrap attempts
// without having to run them through Raft, which should curb abuse.
state := a.srv.fsm.State()
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
allowed, resetIdx, err := state.CanBootstrapACLToken()
if err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
var specifiedIndex uint64 = 0
if !allowed {
// Check if there is a reset index specified
specifiedIndex = a.fileBootstrapResetIndex()
if specifiedIndex == 0 {
return fmt.Errorf("ACL bootstrap no longer allowed (reset index: %d)", resetIdx)
} else if specifiedIndex != resetIdx {
return fmt.Errorf("Invalid bootstrap reset index (specified %d, reset index: %d)", specifiedIndex, resetIdx)
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// remove the bootstrap override file now that we have the index from it and it was valid.
// whether bootstrapping works or not is irrelevant as we really don't want this file hanging around
// in case a snapshot restore is done. In that case we don't want to accidentally allow re-bootstrapping
2018-11-02 17:00:39 +00:00
// just because the file was unchanged.
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
a.removeBootstrapResetFile()
accessor, err := lib.GenerateUUID(a.srv.checkTokenUUID)
if err != nil {
return err
}
secret := args.BootstrapSecret
if secret == "" {
secret, err = lib.GenerateUUID(a.srv.checkTokenUUID)
if err != nil {
return err
}
} else {
_, err = uuid.ParseUUID(secret)
if err != nil {
return err
}
ok, err := a.srv.checkTokenUUID(secret)
if err != nil {
return err
}
if !ok {
return fmt.Errorf("Provided token cannot be used because a token with that secret already exists.")
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
req := structs.ACLTokenBootstrapRequest{
Token: structs.ACLToken{
AccessorID: accessor,
SecretID: secret,
Description: "Bootstrap Token (Global Management)",
Policies: []structs.ACLTokenPolicyLink{
{
ID: structs.ACLPolicyGlobalManagementID,
},
},
2021-09-29 22:43:45 +00:00
CreateTime: time.Now(),
Local: false,
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
ResetIndex: specifiedIndex,
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
req.Token.SetHash(true)
_, err = a.srv.raftApply(structs.ACLBootstrapRequestType, &req)
if err != nil {
return err
}
if _, token, err := state.ACLTokenGetByAccessor(nil, accessor, structs.DefaultEnterpriseMetaInDefaultPartition()); err == nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
*reply = *token
}
a.logger.Info("ACL bootstrap completed")
return nil
}
func (a *ACL) TokenRead(args *structs.ACLTokenGetRequest, reply *structs.ACLTokenResponse) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// clients will not know whether the server has local token store. In the case
2018-11-02 17:00:39 +00:00
// where it doesn't we will transparently forward requests.
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if !a.srv.LocalTokensEnabled() {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.TokenRead", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
var authz resolver.Result
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if args.TokenIDType == structs.ACLTokenAccessor {
var err error
var authzContext acl.AuthorizerContext
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// Only ACLRead privileges are required to list tokens
// However if you do not have ACLWrite as well the token
// secrets will be redacted
if authz, err = a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
2014-08-06 17:30:47 +00:00
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var index uint64
var token *structs.ACLToken
var err error
if args.TokenIDType == structs.ACLTokenAccessor {
index, token, err = state.ACLTokenGetByAccessor(ws, args.TokenID, &args.EnterpriseMeta)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if token != nil {
a.srv.filterACLWithAuthorizer(authz, &token)
// token secret was redacted
if token.SecretID == aclfilter.RedactedToken {
reply.Redacted = true
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
} else {
index, token, err = state.ACLTokenGetBySecret(ws, args.TokenID, nil)
// no extra validation is needed here. If you have the secret ID you can read it.
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if err != nil {
return err
}
if token != nil && token.IsExpired(time.Now()) {
return fmt.Errorf("token has expired: %w", acl.ErrNotFound)
} else if token == nil {
// token does not exist
if ns := args.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("token not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("token does not exist: %w", acl.ErrNotFound)
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
reply.Index, reply.Token = index, token
reply.SourceDatacenter = args.Datacenter
if args.Expanded {
info, err := a.lookupExpandedTokenInfo(ws, state, token)
if err != nil {
return err
}
reply.ExpandedTokenInfo = info
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return nil
})
}
func (a *ACL) lookupExpandedTokenInfo(ws memdb.WatchSet, state *state.Store, token *structs.ACLToken) (structs.ExpandedTokenInfo, error) {
policyIDs := make(map[string]struct{})
roleIDs := make(map[string]struct{})
identityPolicies := make(map[string]*structs.ACLPolicy)
tokenInfo := structs.ExpandedTokenInfo{}
// Add the token's policies and node/service identity policies
for _, policy := range token.Policies {
policyIDs[policy.ID] = struct{}{}
}
for _, roleLink := range token.Roles {
roleIDs[roleLink.ID] = struct{}{}
}
for _, identity := range token.ServiceIdentities {
policy := identity.SyntheticPolicy(&token.EnterpriseMeta)
identityPolicies[policy.ID] = policy
}
for _, identity := range token.NodeIdentities {
policy := identity.SyntheticPolicy(&token.EnterpriseMeta)
identityPolicies[policy.ID] = policy
}
// Get any namespace default roles/policies to look up
nsPolicies, nsRoles, err := getTokenNamespaceDefaults(ws, state, &token.EnterpriseMeta)
if err != nil {
return tokenInfo, err
}
tokenInfo.NamespaceDefaultPolicyIDs = nsPolicies
tokenInfo.NamespaceDefaultRoleIDs = nsRoles
for _, id := range nsPolicies {
policyIDs[id] = struct{}{}
}
for _, id := range nsRoles {
roleIDs[id] = struct{}{}
}
// Add each role's policies and node/service identity policies
for roleID := range roleIDs {
_, role, err := state.ACLRoleGetByID(ws, roleID, &token.EnterpriseMeta)
if err != nil {
return tokenInfo, err
}
if role == nil {
continue
}
for _, policy := range role.Policies {
policyIDs[policy.ID] = struct{}{}
}
for _, identity := range role.ServiceIdentities {
policy := identity.SyntheticPolicy(&role.EnterpriseMeta)
identityPolicies[policy.ID] = policy
}
for _, identity := range role.NodeIdentities {
policy := identity.SyntheticPolicy(&role.EnterpriseMeta)
identityPolicies[policy.ID] = policy
}
tokenInfo.ExpandedRoles = append(tokenInfo.ExpandedRoles, role)
}
var policies []*structs.ACLPolicy
for id := range policyIDs {
_, policy, err := state.ACLPolicyGetByID(ws, id, &token.EnterpriseMeta)
if err != nil {
return tokenInfo, err
}
if policy == nil {
continue
}
policies = append(policies, policy)
}
for _, policy := range identityPolicies {
policies = append(policies, policy)
}
tokenInfo.ExpandedPolicies = policies
tokenInfo.AgentACLDefaultPolicy = a.srv.config.ACLResolverSettings.ACLDefaultPolicy
tokenInfo.AgentACLDownPolicy = a.srv.config.ACLResolverSettings.ACLDownPolicy
tokenInfo.ResolvedByAgent = a.srv.config.NodeName
return tokenInfo, nil
}
func (a *ACL) TokenClone(args *structs.ACLTokenSetRequest, reply *structs.ACLToken) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
2014-08-06 17:30:47 +00:00
if err := a.srv.validateEnterpriseRequest(&args.ACLToken.EnterpriseMeta, true); err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// clients will not know whether the server has local token store. In the case
2018-11-02 17:00:39 +00:00
// where it doesn't we will transparently forward requests.
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if !a.srv.LocalTokensEnabled() {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.TokenClone", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
defer metrics.MeasureSince([]string{"acl", "token", "clone"}, time.Now())
var authzContext acl.AuthorizerContext
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.ACLToken.EnterpriseMeta, &authzContext)
if err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
_, token, err := a.srv.fsm.State().ACLTokenGetByAccessor(nil, args.ACLToken.AccessorID, &args.ACLToken.EnterpriseMeta)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
} else if token == nil {
if ns := args.ACLToken.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("token not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("token does not exist: %w", acl.ErrNotFound)
} else if token.IsExpired(time.Now()) {
return fmt.Errorf("token is expired: %w", acl.ErrNotFound)
} else if !a.srv.InPrimaryDatacenter() && !token.Local {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// global token writes must be forwarded to the primary DC
args.Datacenter = a.srv.config.PrimaryDatacenter
return a.srv.forwardDC("ACL.TokenClone", a.srv.config.PrimaryDatacenter, args, reply)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if token.AuthMethod != "" {
return fmt.Errorf("Cannot clone a token created from an auth method")
}
clone := &structs.ACLToken{
Policies: token.Policies,
Roles: token.Roles,
ServiceIdentities: token.ServiceIdentities,
NodeIdentities: token.NodeIdentities,
Local: token.Local,
Description: token.Description,
ExpirationTime: token.ExpirationTime,
EnterpriseMeta: args.ACLToken.EnterpriseMeta,
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if args.ACLToken.Description != "" {
clone.Description = args.ACLToken.Description
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
updated, err := a.srv.aclTokenWriter().Create(clone, false)
if err == nil {
*reply = *updated
}
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
func (a *ACL) TokenSet(args *structs.ACLTokenSetRequest, reply *structs.ACLToken) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.ACLToken.EnterpriseMeta, true); err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// Global token creation/modification always goes to the ACL DC
if !args.ACLToken.Local {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
} else if !a.srv.LocalTokensEnabled() {
return fmt.Errorf("Local tokens are disabled")
}
if done, err := a.srv.ForwardRPC("ACL.TokenSet", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
defer metrics.MeasureSince([]string{"acl", "token", "upsert"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.ACLToken.EnterpriseMeta, &authzContext); err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
var (
updated *structs.ACLToken
err error
)
writer := a.srv.aclTokenWriter()
if args.ACLToken.AccessorID == "" || args.Create {
updated, err = writer.Create(&args.ACLToken, false)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
} else {
updated, err = writer.Update(&args.ACLToken)
2016-08-03 05:04:11 +00:00
}
if err == nil {
*reply = *updated
}
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func (a *ACL) TokenDelete(args *structs.ACLTokenDeleteRequest, reply *string) error {
if err := a.aclPreCheck(); err != nil {
2016-08-03 05:04:11 +00:00
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if !a.srv.LocalTokensEnabled() {
args.Datacenter = a.srv.config.PrimaryDatacenter
2016-08-03 05:04:11 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.TokenDelete", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
defer metrics.MeasureSince([]string{"acl", "token", "delete"}, time.Now())
2016-08-03 05:04:11 +00:00
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
2016-08-03 05:04:11 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
2016-08-03 05:04:11 +00:00
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if _, err := uuid.ParseUUID(args.TokenID); err != nil {
return fmt.Errorf("Accessor ID is missing or an invalid UUID")
}
if args.TokenID == acl.AnonymousTokenID {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return fmt.Errorf("Delete operation not permitted on the anonymous token")
}
// grab the token here so we can invalidate our cache later on
_, token, err := a.srv.fsm.State().ACLTokenGetByAccessor(nil, args.TokenID, &args.EnterpriseMeta)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
if token != nil {
if args.Token == token.SecretID {
return fmt.Errorf("Deletion of the request's authorization token is not permitted")
}
// No need to check expiration time because it's being deleted.
// token found in secondary DC but its not local so it must be deleted in the primary
if !a.srv.InPrimaryDatacenter() && !token.Local {
args.Datacenter = a.srv.config.PrimaryDatacenter
return a.srv.forwardDC("ACL.TokenDelete", a.srv.config.PrimaryDatacenter, args, reply)
}
} else if !a.srv.InPrimaryDatacenter() {
// token not found in secondary DC - attempt to delete within the primary
args.Datacenter = a.srv.config.PrimaryDatacenter
return a.srv.forwardDC("ACL.TokenDelete", a.srv.config.PrimaryDatacenter, args, reply)
} else {
// in Primary Datacenter but the token does not exist - return early indicating it wasn't found.
if ns := args.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("token not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("token does not exist: %w", acl.ErrNotFound)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
req := &structs.ACLTokenBatchDeleteRequest{
TokenIDs: []string{args.TokenID},
}
_, err = a.srv.raftApply(structs.ACLTokenDeleteRequestType, req)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return fmt.Errorf("Failed to apply token delete request: %v", err)
}
// Purge the identity from the cache to prevent using the previous definition of the identity
a.srv.ACLResolver.cache.RemoveIdentityWithSecretToken(token.SecretID)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if reply != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
*reply = token.AccessorID
}
return nil
}
func (a *ACL) TokenList(args *structs.ACLTokenListRequest, reply *structs.ACLTokenListResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if !a.srv.LocalTokensEnabled() {
if args.Datacenter != a.srv.config.PrimaryDatacenter {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
args.IncludeLocal = false
args.IncludeGlobal = true
}
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.TokenList", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
var authzContext acl.AuthorizerContext
var requestMeta acl.EnterpriseMeta
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &requestMeta, &authzContext)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
// merge the token default meta into the requests meta
args.EnterpriseMeta.Merge(&requestMeta)
args.EnterpriseMeta.FillAuthzContext(&authzContext)
if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
var methodMeta *acl.EnterpriseMeta
if args.AuthMethod != "" {
methodMeta = args.ACLAuthMethodEnterpriseMeta.ToEnterpriseMeta()
// attempt to merge in the overall meta, wildcards will not be merged
methodMeta.MergeNoWildcard(&args.EnterpriseMeta)
// in the event that the meta above didn't merge due to being a wildcard
// we ensure that proper token based meta inference occurs
methodMeta.Merge(&requestMeta)
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, s *state.Store) error {
index, tokens, err := s.ACLTokenListWithParameters(ws, state.ACLTokenListParameters{
Local: args.IncludeLocal,
Global: args.IncludeGlobal,
Policy: args.Policy,
Role: args.Role,
MethodName: args.AuthMethod,
ServiceName: args.ServiceName,
MethodMeta: methodMeta,
EnterpriseMeta: &args.EnterpriseMeta,
})
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
now := time.Now()
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
stubs := make([]*structs.ACLTokenListStub, 0, len(tokens))
for _, token := range tokens {
if token.IsExpired(now) {
continue
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
stubs = append(stubs, token.Stub())
}
// filter down to just the tokens that the requester has permissions to read
a.srv.filterACLWithAuthorizer(authz, &stubs)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
reply.Index, reply.Tokens = index, stubs
return nil
})
}
func (a *ACL) TokenBatchRead(args *structs.ACLTokenBatchGetRequest, reply *structs.ACLTokenBatchResponse) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.TokenBatchRead", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
authz, err := a.srv.ResolveToken(args.Token)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, tokens, err := state.ACLTokenBatchGet(ws, args.AccessorIDs)
if err != nil {
return err
}
// This RPC is used for replication, so don't filter out expired tokens here.
// Filter the tokens down to just what we have permission to see - also redact
// secrets based on allowed permissions. We could just call filterACLWithAuthorizer
// on the whole token list but then it would require another pass through the token
// list to determine if any secrets were redacted. Its a small amount of code to
// process the loop so it was duplicated here and we instead call the filter func
// with just a single token.
ret := make(structs.ACLTokens, 0, len(tokens))
for _, token := range tokens {
final := token
a.srv.filterACLWithAuthorizer(authz, &final)
if final != nil {
ret = append(ret, final)
if final.SecretID == aclfilter.RedactedToken {
reply.Redacted = true
}
} else {
reply.Removed = true
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
reply.Index, reply.Tokens = index, ret
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return nil
})
}
func (a *ACL) PolicyRead(args *structs.ACLPolicyGetRequest, reply *structs.ACLPolicyResponse) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.PolicyRead", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
2020-03-25 14:34:24 +00:00
var (
index uint64
policy *structs.ACLPolicy
err error
)
if args.PolicyID != "" {
index, policy, err = state.ACLPolicyGetByID(ws, args.PolicyID, &args.EnterpriseMeta)
} else {
index, policy, err = state.ACLPolicyGetByName(ws, args.PolicyName, &args.EnterpriseMeta)
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
reply.Index, reply.Policy = index, policy
if policy == nil {
return errNotFound
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return nil
})
}
func (a *ACL) PolicyBatchRead(args *structs.ACLPolicyBatchGetRequest, reply *structs.ACLPolicyBatchResponse) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.PolicyBatchRead", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
authz, err := a.srv.ResolveToken(args.Token)
if err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, policies, err := state.ACLPolicyBatchGet(ws, args.PolicyIDs)
if err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
a.srv.filterACLWithAuthorizer(authz, &policies)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
reply.Index, reply.Policies = index, policies
return nil
})
}
func (a *ACL) PolicySet(args *structs.ACLPolicySetRequest, reply *structs.ACLPolicy) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.Policy.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.InPrimaryDatacenter() {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.PolicySet", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
defer metrics.MeasureSince([]string{"acl", "policy", "upsert"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.Policy.EnterpriseMeta, &authzContext); err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
policy := &args.Policy
state := a.srv.fsm.State()
// Almost all of the checks here are also done in the state store. However,
// we want to prevent the raft operations when we know they are going to fail
// so we still do them here.
// ensure a name is set
if policy.Name == "" {
return fmt.Errorf("Invalid Policy: no Name is set")
}
Backport of [CC-5719] Add support for builtin global-read-only policy into release/1.16.x (#18345) * [OSS] Post Consul 1.16 updates (#17606) * chore: update dev build to 1.17 * chore(ci): add nightly 1.16 test Drop the oldest and add the newest running release branch to nightly builds. * Add writeAuditRPCEvent to agent_oss (#17607) * Add writeAuditRPCEvent to agent_oss * fix the other diffs * backport change log * Add Envoy and Consul version constraints to Envoy extensions (#17612) * [API Gateway] Fix trust domain for external peered services in synthesis code (#17609) * [API Gateway] Fix trust domain for external peered services in synthesis code * Add changelog * backport ent changes to oss (#17614) * backport ent changes to oss * Update .changelog/_5669.txt Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> --------- Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> * Update intentions.mdx (#17619) Make behaviour of L7 intentions clearer * enterprise changelog update for audit (#17625) * Update list of Envoy versions (#17546) * [API Gateway] Fix rate limiting for API gateways (#17631) * [API Gateway] Fix rate limiting for API gateways * Add changelog * Fix failing unit tests * Fix operator usage tests for api package * sort some imports that are wonky between oss and ent (#17637) * PmTLS and tproxy improvements with failover and L7 traffic mgmt for k8s (#17624) * porting over changes from enterprise repo to oss * applied feedback on service mesh for k8s overview * fixed typo * removed ent-only build script file * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Delete check-legacy-links-format.yml (#17647) * docs: Reference doc updates for permissive mTLS settings (#17371) * Reference doc updates for permissive mTLS settings * Document config entry filtering * Fix minor doc errors (double slashes in link url paths) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add generic experiments configuration and use it to enable catalog v2 resources (#17604) * Add generic experiments configuration and use it to enable catalog v2 resources * Run formatting with -s as CI will validate that this has been done * api-gateway: stop adding all header filters to virtual host when generating xDS (#17644) * Add header filter to api-gateway xDS golden test * Stop adding all header filters to virtual host when generating xDS for api-gateway * Regenerate xDS golden file for api-gateway w/ header filter * fix: add agent info reporting log (#17654) * Add new Consul 1.16 docs (#17651) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * fix build errors --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Default `ProxyType` for builtin extensions (#17657) * Post 1.16.0-rc1 updates (#17663) - Update changelog to include new entries from release - Update submodule versions to latest published * Update service-defaults.mdx (#17656) * docs: Sameness Groups (#17628) * port from enterprise branch * Apply suggestions from code review Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx * next steps * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Remove "BETA" marker from config entries (#17670) * CAPIgw for K8s installation updates for 1.16 (#17627) * trimmed CRD step and reqs from installation * updated tech specs * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * added upgrade instruction * removed tcp port req * described downtime and DT-less upgrades * applied additional review feedback --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * additional feedback on API gateway upgrades (#17677) * additional feedback * Update website/content/docs/api-gateway/upgrades.mdx Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> --------- Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * docs: JWT Authorization for intentions (#17643) * Initial page/nav creation * configuration entry reference page * Usage + fixes * service intentions page * usage * description * config entry updates * formatting fixes * Update website/content/docs/connect/config-entries/service-intentions.mdx Co-authored-by: Paul Glass <pglass@hashicorp.com> * service intentions review fixes * Overview page review fixes * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: minor fixes to JWT auth docs (#17680) * Fixes * service intentions fixes * Fix two WAL metrics in docs/agent/telemetry.mdx (#17593) * updated failover for k8s w-tproxy page title (#17683) * Add release notes 1.16 rc (#17665) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Add release notes for 1.16-rc * Add consul-e license utlization reporting * Update with rc absolute links * Update with rc absolute links * fix typo * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update to use callout component * address typo * docs: FIPS 140-2 Compliance (#17668) * Page + nav + formatting * link fix * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * link fix * Apply suggestions from code review Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix apigw install values file * fix typos in release notes --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix release notes links (#17687) * adding redirects for tproxy and envoy extensions (#17688) * adding redirects * Apply suggestions from code review * Fix FIPS copy (#17691) * fix release notes links * fix typos on fips docs * [NET-4107][Supportability] Log Level set to TRACE and duration set to 5m for consul-debug (#17596) * changed duration to 5 mins and log level to trace * documentation update * change log * ENT merge of ext-authz extension updates (#17684) * docs: Update default values for Envoy extension proxy types (#17676) * fix: stop peering delete routine on leader loss (#17483) * Refactor disco chain prioritize by locality structs (#17696) This includes prioritize by localities on disco chain targets rather than resolvers, allowing different targets within the same partition to have different policies. * agent: remove agent cache dependency from service mesh leaf certificate management (#17075) * agent: remove agent cache dependency from service mesh leaf certificate management This extracts the leaf cert management from within the agent cache. This code was produced by the following process: 1. All tests in agent/cache, agent/cache-types, agent/auto-config, agent/consul/servercert were run at each stage. - The tests in agent matching .*Leaf were run at each stage. - The tests in agent/leafcert were run at each stage after they existed. 2. The former leaf cert Fetch implementation was extracted into a new package behind a "fake RPC" endpoint to make it look almost like all other cache type internals. 3. The old cache type was shimmed to use the fake RPC endpoint and generally cleaned up. 4. I selectively duplicated all of Get/Notify/NotifyCallback/Prepopulate from the agent/cache.Cache implementation over into the new package. This was renamed as leafcert.Manager. - Code that was irrelevant to the leaf cert type was deleted (inlining blocking=true, refresh=false) 5. Everything that used the leaf cert cache type (including proxycfg stuff) was shifted to use the leafcert.Manager instead. 6. agent/cache-types tests were moved and gently replumbed to execute as-is against a leafcert.Manager. 7. Inspired by some of the locking changes from derek's branch I split the fat lock into N+1 locks. 8. The waiter chan struct{} was eventually replaced with a singleflight.Group around cache updates, which was likely the biggest net structural change. 9. The awkward two layers or logic produced as a byproduct of marrying the agent cache management code with the leaf cert type code was slowly coalesced and flattened to remove confusion. 10. The .*Leaf tests from the agent package were copied and made to work directly against a leafcert.Manager to increase direct coverage. I have done a best effort attempt to port the previous leaf-cert cache type's tests over in spirit, as well as to take the e2e-ish tests in the agent package with Leaf in the test name and copy those into the agent/leafcert package to get more direct coverage, rather than coverage tangled up in the agent logic. There is no net-new test coverage, just coverage that was pushed around from elsewhere. * [core]: Pin github action workflows (#17695) * docs: missing changelog for _5517 (#17706) * add enterprise notes for IP-based rate limits (#17711) * add enterprise notes for IP-based rate limits * Apply suggestions from code review Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * added bolded 'Enterprise' in list items. --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Update compatibility.mdx (#17713) * Remove extraneous version info for Config entries (#17716) * Update terminating-gateway.mdx * Update exported-services.mdx * Update mesh.mdx * fix: typo in link to section (#17527) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Bump Alpine to 3.18 (#17719) * Update Dockerfile * Create 17719.txt * NET-1825: New ACL token creation docs (#16465) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * [NET-3865] [Supportability] Additional Information in the output of 'consul operator raft list-peers' (#17582) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * OSS merge: Update error handling login when applying extensions (#17740) * Bump atlassian/gajira-transition from 3.0.0 to 3.0.1 (#17741) Bumps [atlassian/gajira-transition](https://github.com/atlassian/gajira-transition) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/atlassian/gajira-transition/releases) - [Commits](https://github.com/atlassian/gajira-transition/compare/4749176faf14633954d72af7a44d7f2af01cc92b...38fc9cd61b03d6a53dd35fcccda172fe04b36de3) --- updated-dependencies: - dependency-name: atlassian/gajira-transition dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add truncation to body (#17723) * docs: Failover overview minor fix (#17743) * Incorrect symbol * Clarification * slight edit for clarity * docs - update Envoy and Dataplane compat matrix (#17752) * Update envoy.mdx added more detail around default versus other compatible versions * validate localities on agent configs and registration endpoints (#17712) * Updated docs added explanation. (#17751) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Update index.mdx (#17749) * added redirects and updated links (#17764) * Add transparent proxy enhancements changelog (#17757) * docs - remove use of consul leave during upgrade instructions (#17758) * Fix issue with streaming service health watches. (#17775) Fix issue with streaming service health watches. This commit fixes an issue where the health streams were unaware of service export changes. Whenever an exported-services config entry is modified, it is effectively an ACL change. The bug would be triggered by the following situation: - no services are exported - an upstream watch to service X is spawned - the streaming backend filters out data for service X (due to lack of exports) - service X is finally exported In the situation above, the streaming backend does not trigger a refresh of its data. This means that any events that were supposed to have been received prior to the export are NOT backfilled, and the watches never see service X spawning. We currently have decided to not trigger a stream refresh in this situation due to the potential for a thundering herd effect (touching exports would cause a re-fetch of all watches for that partition, potentially). Therefore, a local blocking-query approach was added by this commit for agentless. It's also worth noting that the streaming subscription is currently bypassed most of the time with agentful, because proxycfg has a `req.Source.Node != ""` which prevents the `streamingEnabled` check from passing. This means that while agents should technically have this same issue, they don't experience it with mesh health watches. Note that this is a temporary fix that solves the issue for proxycfg, but not service-discovery use cases. * Property Override validation improvements (#17759) * Reject inbound Prop Override patch with Services Services filtering is only supported for outbound TrafficDirection patches. * Improve Prop Override unexpected type validation - Guard against additional invalid parent and target types - Add specific error handling for Any fields (unsupported) * Fixes (#17765) * Update license get explanation (#17782) This PR is to clarify what happens if the license get command is run on a follower if the leader hasn't been updated with a newer license. * Add Patch index to Prop Override validation errors (#17777) When a patch is found invalid, include its index for easier debugging when multiple patches are provided. * Stop referenced jwt providers from being deleted (#17755) * Stop referenced jwt providers from being deleted * Implement a Catalog Controllers Lifecycle Integration Test (#17435) * Implement a Catalog Controllers Lifecycle Integration Test * Prevent triggering the race detector. This allows defining some variables for protobuf constants and using those in comparisons. Without that, something internal in the fmt package ended up looking at the protobuf message size cache and triggering the race detector. * HCP Add node id/name to config (#17750) * Catalog V2 Container Based Integration Test (#17674) * Implement the Catalog V2 controller integration container tests This now allows the container tests to import things from the root module. However for now we want to be very restrictive about which packages we allow importing. * Add an upgrade test for the new catalog Currently this should be dormant and not executed. However its put in place to detect breaking changes in the future and show an example of how to do an upgrade test with integration tests structured like catalog v2. * Make testutil.Retry capable of performing cleanup operations These cleanup operations are executed after each retry attempt. * Move TestContext to taking an interface instead of a concrete testing.T This allows this to be used on a retry.R or generally anything that meets the interface. * Move to using TestContext instead of background contexts Also this forces all test methods to implement the Cleanup method now instead of that being an optional interface. Co-authored-by: Daniel Upton <daniel@floppy.co> * Fix Docs for Trails Leader By (#17763) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added * fix doc * fix docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Improve Prop Override docs examples (#17799) - Provide more realistics examples for setting properties not already supported natively by Consul - Remove superfluous commas from HCL, correct target service name, and fix service defaults vs. proxy defaults in examples - Align existing integration test to updated docs * Test permissive mTLS filter chain not configured with tproxy disabled (#17747) * Add documentation for remote debugging of integration tests. (#17800) * Add documentation for remote debugging of integration tests. * add link from main docs page. * changes related to PR feedback * Clarify limitations of Prop Override extension (#17801) Explicitly document the limitations of the extension, particularly what kind of fields it is capable of modifying. * Fix formatting for webhook-certs Consul tutorial (#17810) * Fix formatting for webhook-certs Consul tutorial * Make a small grammar change to also pick up whitespace changes necessary for formatting --------- Co-authored-by: David Yu <dyu@hashicorp.com> * Add jwt-authn metrics to jwt-provider docs (#17816) * [NET-3095] add jwt-authn metrics docs * Change URLs for redirects from RC to default latest (#17822) * Set GOPRIVATE for all hashicorp repos in CI (#17817) Consistently set GOPRIVATE to include all hashicorp repos, s.t. private modules are successfully pulled in enterprise CI. * Make locality aware routing xDS changes (#17826) * Fixup consul-container/test/debugging.md (#17815) Add missing `-t` flag and fix minor typo. * fixes #17732 - AccessorID in request body should be optional when updating ACL token (#17739) * AccessorID in request body should be optional when updating ACL token * add a test case * fix test case * add changelog entry for PR #17739 * CA provider doc updates and Vault provider minor update (#17831) Update CA provider docs Clarify that providers can differ between primary and secondary datacenters Provide a comparison chart for consul vs vault CA providers Loosen Vault CA provider validation for RootPKIPath Update Vault CA provider documentation * ext-authz Envoy extension: support `localhost` as a valid target URI. (#17821) * CI Updates (#17834) * Ensure that git access to private repos uses the ELEVATED_GITHUB_TOKEN * Bump the runner size for the protobuf generation check This has failed previously when the runner process that communicates with GitHub gets starved causing the job to fail. * counter part of ent pr (#17618) * watch: support -filter for consul watch: checks, services, nodes, service (#17780) * watch: support -filter for watch checks * Add filter for watch nodes, services, and service - unit test added - Add changelog - update doc * Trigger OSS => ENT merge for all release branches (#17853) Previously, this only triggered for release/*.*.x branches; however, our release process involves cutting a release/1.16.0 branch, for example, at time of code freeze these days. Any PRs to that branch after code freeze today do not make their way to consul-enterprise. This will make behavior for a .0 branch consistent with current behavior for a .x branch. * Update service-mesh.mdx (#17845) Deleted two commas which looks quite like some leftovers. * Add docs for sameness groups with resolvers. (#17851) * docs: add note about path prefix matching behavior for HTTPRoute config (#17860) * Add note about path prefix matching behavior for HTTPRoute config * Update website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: update upgrade to consul-dataplane docs on k8s (#17852) * resource: add `AuthorizerContext` helper method (#17393) * resource: enforce consistent naming of resource types (#17611) For consistency, resource type names must follow these rules: - `Group` must be snake case, and in most cases a single word. - `GroupVersion` must be lowercase, start with a "v" and end with a number. - `Kind` must be pascal case. These were chosen because they map to our protobuf type naming conventions. * tooling: generate protoset file (#17364) Extends the `proto` make target to generate a protoset file for use with grpcurl etc. * Fix a bug that wrongly trims domains when there is an overlap with DC name (#17160) * Fix a bug that wrongly trims domains when there is an overlap with DC name Before this change, when DC name and domain/alt-domain overlap, the domain name incorrectly trimmed from the query. Example: Given: datacenter = dc-test, alt-domain = test.consul. Querying for "test-node.node.dc-test.consul" will faile, because the code was trimming "test.consul" instead of just ".consul" This change, fixes the issue by adding dot (.) before trimming * trimDomain: ensure domain trimmed without modyfing original domains * update changelog --------- Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * deps: aws-sdk-go v1.44.289 (#17876) Signed-off-by: Dan Bond <danbond@protonmail.com> * api-gateway: add operation cannot be fulfilled error to common errors (#17874) * add error message * Update website/content/docs/api-gateway/usage/errors.mdx Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * fix formating issues --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * api-gateway: add step to upgrade instructions for creating intentions (#17875) * Changelog - add 1.13.9, 1.14.8, and 1.15.4 (#17889) * docs: update config enable_debug (#17866) * update doc for config enable_debug * Update website/content/docs/agent/config/config-files.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update wording on WAN fed and intermediate_pki_path (#17850) * Allow service identity tokens the ability to read jwt-providers (#17893) * Allow service identity tokens the ability to read jwt-providers * more tests * service_prefix tests * Update docs (#17476) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add emit_tags_as_labels to envoy bootstrap config when using Consul Telemetry Collector (#17888) * Fix command from kg to kubectl get (#17903) * Create and update release notes for 1.16 and 1.2 (#17895) * update release notes for 1.16 and 1.2 * update latest consul core release * Propose new changes to APIgw upgrade instructions (#17693) * Propose new changes to APIgw upgrade instructions * fix build error * update callouts to render correctly * Add hideClipboard to log messages * Added clarification around consul k8s and crds * Add workflow to verify linux release packages (#17904) * adding docker files to verify linux packages. * add verifr-release-linux.yml * updating name * pass inputs directly into jobs * add other linux package platforms * remove on push * fix TARGETARCH on debian and ubuntu so it can check arm64 and amd64 * fixing amazon to use the continue line * add ubuntu i386 * fix comment lines * working * remove commented out workflow jobs * Apply suggestions from code review Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * update fedora and ubuntu to use latest tag --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * Reference hashicorp/consul instead of consul for Docker image (#17914) * Reference hashicorp/consul instead of consul for Docker image * Update Make targets that pull consul directly * Update Consul K8s Upgrade Doc Updates (#17921) Updating upgrade procedures to encompass expected errors during upgrade process from v1.13.x to v1.14.x. * Update sameness-group.mdx (#17915) * Update create-sameness-groups.mdx (#17927) * deps: coredns v1.10.1 (#17912) * Ensure RSA keys are at least 2048 bits in length (#17911) * Ensure RSA keys are at least 2048 bits in length * Add changelog * update key length check for FIPS compliance * Fix no new variables error and failing to return when error exists from validating * clean up code for better readability * actually return value * tlsutil: Fix check TLS configuration (#17481) * tlsutil: Fix check TLS configuration * Rewording docs. * Update website/content/docs/services/configuration/checks-configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Fix typos and add changelog entry. --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Deprecations for connect-native SDK and specific connect native APIs (#17937) * Update v1_16_x.mdx * Update connect native golang page --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Revert "Add workflow to verify linux release packages (#17904)" (#17942) This reverts commit 3368f14fab500ebe9f6aeab5631dd1d5f5a453e5. * Fixes Secondary ConnectCA update (#17846) This fixes a bug that was identified which resulted in subsequent ConnectCA configuration update not to persist in the cluster. * fixing typo in link to jwt-validations-with-intentions doc (#17955) * Fix streaming backend link (#17958) * Fix streaming backend link * Update health.mdx * Dynamically create jwks clusters for jwt-providers (#17944) * website: remove deprecated agent rpc docs (#17962) * Fix missing BalanceOutboundConnections in v2 catalog. (#17964) * feature - [NET - 4005] - [Supportability] Reloadable Configuration - enable_debug (#17565) * # This is a combination of 9 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same * # This is a combination of 12 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same # This is the commit message #10: made enable debug atomic bool # This is the commit message #11: fix lint # This is the commit message #12: fix test true enable debug * parent 10f500e895d92cc3691ade7b74a33db755d22039 author absolutelightning <ashesh.vidyut@hashicorp.com> 1687352587 +0530 committer absolutelightning <ashesh.vidyut@hashicorp.com> 1687352592 +0530 init without tests change log fix tests fix tests added tests change log breaking change removed breaking change fix test keeping the test behaviour same made enable debug atomic bool fix lint fix test true enable debug using enable debug in agent as atomic bool test fixes fix tests fix tests added update on correct locaiton fix tests fix reloadable config enable debug fix tests fix init and acl 403 * revert commit * Fix formatting codeblocks on APIgw docs (#17970) * fix formatting codeblocks * remove unnecessary indents * Remove POC code (#17974) * update doc (#17910) * update doc * update link * Remove duplicate and unused newDecodeConfigEntry func (#17979) * docs: samenessGroup YAML examples (#17984) * configuration entry syntax * Example config * Add changelog entry for 1.16.0 (#17987) * Fix typo (#17198) servcies => services * Expose JWKS cluster config through JWTProviderConfigEntry (#17978) * Expose JWKS cluster config through JWTProviderConfigEntry * fix typos, rename trustedCa to trustedCA * Integration test for ext-authz Envoy extension (#17980) * Fix incorrect protocol for transparent proxy upstreams. (#17894) This PR fixes a bug that was introduced in: https://github.com/hashicorp/consul/pull/16021 A user setting a protocol in proxy-defaults would cause tproxy implicit upstreams to not honor the upstream service's protocol set in its `ServiceDefaults.Protocol` field, and would instead always use the proxy-defaults value. Due to the fact that upstreams configured with "tcp" can successfully contact upstream "http" services, this issue was not recognized until recently (a proxy-defaults with "tcp" and a listening service with "http" would make successful requests, but not the opposite). As a temporary work-around, users experiencing this issue can explicitly set the protocol on the `ServiceDefaults.UpstreamConfig.Overrides`, which should take precedence. The fix in this PR removes the proxy-defaults protocol from the wildcard upstream that tproxy uses to configure implicit upstreams. When the protocol was included, it would always overwrite the value during discovery chain compilation, which was not correct. The discovery chain compiler also consumes proxy defaults to determine the protocol, so simply excluding it from the wildcard upstream config map resolves the issue. * feat: include nodes count in operator usage endpoint and cli command (#17939) * feat: update operator usage api endpoint to include nodes count * feat: update operator usange cli command to includes nodes count * [OSS] Improve Gateway Test Coverage of Catalog Health (#18011) * fix(cli): remove failing check from 'connect envoy' registration for api gateway * test(integration): add tests to check catalog statsus of gateways on startup * remove extra sleep comment * Update test/integration/consul-container/libs/assert/service.go * changelog * Fixes Traffic rate limitting docs (#17997) * Fix removed service-to-service peering links (#17221) * docs: fix removed service-to-service peering links * docs: extend peering-via-mesh-gateways intro (thanks @trujillo-adam) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Sameness "beta" warning (#18017) * Warning updates * .x * updated typo in tab heading (#18022) * updated typo in tab heading * updated tab group typo, too * Document that DNS lookups can target cluster peers (#17990) Static DNS lookups, in addition to explicitly targeting a datacenter, can target a cluster peer. This was added in 95dc0c7b301b70a6b955a8b7c9737c9b86f03df6 but didn't make the documentation. The driving function for the change is `parseLocality` here: https://github.com/hashicorp/consul/blob/0b1299c28d8127129d61310ee4280055298438e0/agent/dns_oss.go#L25 The biggest change in this is to adjust the standard lookup syntax to tie `.<datacenter>` to `.dc` as required-together, and to append in the similar `.<cluster-peer>.peer` optional argument, both to A record and SRV record lookups. Co-authored-by: David Yu <dyu@hashicorp.com> * Add first integration test for jwt auth with intention (#18005) * fix stand-in text for name field (#18030) * removed sameness conf entry from failover nav (#18033) * docs - add service sync annotations and k8s service weight annotation (#18032) * Docs for https://github.com/hashicorp/consul-k8s/pull/2293 * remove versions for enterprise features since they are old --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * docs - add jobs use case for service mesh k8s (#18037) * docs - add jobs use case for service mesh k8s * add code blocks * address feedback (#18045) * Add verify server hostname to tls default (#17155) * [OSS] Fix initial_fetch_timeout to wait for all xDS resources (#18024) * fix(connect): set initial_fetch_time to wait indefinitely * changelog * PR feedback 1 * ui: fix typos for peer service imports (#17999) * test: fix FIPS inline cert test message (#18076) * Fix a couple typos in Agent Telemetry Metrics docs (#18080) * Fix metrics docs * Add changelog Signed-off-by: josh <josh.timmons@hashicorp.com> --------- Signed-off-by: josh <josh.timmons@hashicorp.com> * docs updates - cluster peering and virtual services (#18069) * Update route-to-virtual-services.mdx * Update establish-peering.mdx * Update service-mesh-compare.mdx (#17279) grammar change * Update helm docs on main (#18085) * ci: use gotestsum v1.10.1 [NET-4042] (#18088) * Docs: Update proxy lifecycle annotations and consul-dataplane flags (#18075) * Update proxy lifecycle annotations and consul-dataplane flags * Pass configured role name to Vault for AWS auth in Connect CA (#17885) * Docs for dataplane upgrade on k8s (#18051) * Docs for dataplane upgrade on k8s --------- Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs - update upgrade index page to not recommend consul leave. (#18100) * Displays Consul version of each nodes in UI nodes section (#17754) * update UINodes and UINodeInfo response with consul-version info added as NodeMeta, fetched from serf members * update test cases TestUINodes, TestUINodeInfo * added nil check for map * add consul-version in local agent node metadata * get consul version from serf member and add this as node meta in catalog register request * updated ui mock response to include consul versions as node meta * updated ui trans and added version as query param to node list route * updates in ui templates to display consul version with filter and sorts * updates in ui - model class, serializers,comparators,predicates for consul version feature * added change log for Consul Version Feature * updated to get version from consul service, if for some reason not available from serf * updated changelog text * updated dependent testcases * multiselection version filter * Update agent/consul/state/catalog.go comments updated Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --------- Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * api gw 1.16 updates (#18081) * api gw 1.16 updates * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * update CodeBlockConfig filename * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * remove non-standard intentions page * Update website/content/docs/api-gateway/configuration/index.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [NET-4103] ci: build s390x (#18067) * ci: build s390x * ci: test s390x * ci: dev build s390x * no GOOS * ent only * build: publish s390x * fix syntax error * fix syntax error again * fix syntax error again x2 * test branch * Move s390x conditionals to step level * remove test branch --------- Co-authored-by: emilymianeil <eneil@hashicorp.com> * :ermahgerd "Sevice Mesh" -> "Service Mesh" (#18116) Just a typo in the docs. * Split pbmesh.UpstreamsConfiguration as a resource out of pbmesh.Upstreams (#17991) Configuration that previously was inlined into the Upstreams resource applies to both explicit and implicit upstreams and so it makes sense to split it out into its own resource. It also has other minor changes: - Renames `proxy.proto` proxy_configuration.proto` - Changes the type of `Upstream.destination_ref` from `pbresource.ID` to `pbresource.Reference` - Adds comments to fields that didn't have them * [NET-4895] ci - api tests and consul container tests error because of dependency bugs with go 1.20.6. Pin go to 1.20.5. (#18124) ### Description The following jobs started failing when go 1.20.6 was released: - `go-test-api-1-19` - `go-test-api-1-20` - `compatibility-integration-tests` - `upgrade-integration-tests` `compatibility-integration-tests` and `compatibility-integration-tests` to this testcontainers issue: https://github.com/testcontainers/testcontainers-go/issues/1359. This issue calls for testcontainers to release a new version when one of their dependencies is fixed. When that is done, we will unpin the go versions in `compatibility-integration-tests` and `compatibility-integration-tests`. ### Testing & Reproduction steps See these jobs broken in CI and then see them work with this PR. --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * Add ingress gateway deprecation notices to docs (#18102) ### Description This adds notices, that ingress gateway is deprecated, to several places in the product docs where ingress gateway is the topic. ### Testing & Reproduction steps Tested with a local copy of the website. ### Links Deprecation of ingress gateway was announced in the Release Notes for Consul 1.16 and Consul-K8s 1.2. See: [https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated ) [https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated) ### PR Checklist * [N/A] updated test coverage * [X] external facing docs updated * [X] appropriate backport labels added * [X] not a security concern --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add docs for jwt cluster configuration (#18004) ### Description <!-- Please describe why you're making this change, in plain English. --> - Add jwt-provider docs for jwks cluster configuration. The configuration was added here: https://github.com/hashicorp/consul/pull/17978 * Docs: fix unmatched bracket for health checks page (#18134) * NET-4657/add resource service client (#18053) ### Description <!-- Please describe why you're making this change, in plain English. --> Dan had already started on this [task](https://github.com/hashicorp/consul/pull/17849) which is needed to start building the HTTP APIs. This just needed some cleanup to get it ready for review. Overview: - Rename `internalResourceServiceClient` to `insecureResourceServiceClient` for name consistency - Configure a `secureResourceServiceClient` with auth enabled ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Fix bug with Vault CA provider (#18112) Updating RootPKIPath but not IntermediatePKIPath would not update leaf signing certs with the new root. Unsure if this happens in practice but manual testing showed it is a bug that would break mesh and agent connections once the old root is pruned. * [NET-4897] net/http host header is now verified and request.host that contains socked now error (#18129) ### Description This is related to https://github.com/hashicorp/consul/pull/18124 where we pinned the go versions in CI to 1.20.5 and 1.19.10. go 1.20.6 and 1.19.11 now validate request host headers for validity, including the hostname cannot be prefixed with slashes. For local communications (npipe://, unix://), the hostname is not used, but we need valid and meaningful hostname. Prior versions go Go would clean the host header, and strip slashes in the process, but go1.20.6 and go1.19.11 no longer do, and reject the host header. Around the community we are seeing that others are intercepting the req.host and if it starts with a slash or ends with .sock, they changing the host to localhost or another dummy value. [client: define a "dummy" hostname to use for local connections by thaJeztah · Pull Request #45942 · moby/moby](https://github.com/moby/moby/pull/45942) ### Testing & Reproduction steps Check CI tests. ### Links * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * add a conditional around setting LANFilter.AllSegments to make sure it is valid (#18139) ### Description This is to correct a code problem because this assumes all segments, but when you get to Enterprise, you can be in partition that is not the default partition, in which case specifying all segments does not validate and fails. This is to correct the setting of this filter with `AllSegments` to `true` to only occur when in the the `default` partition. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> ### PR Checklist * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * chore: bump upgrade integrations tests to 1.15, 116 [NET-4743] (#18130) * re org resource type registry (#18133) * fix: update delegateMock used in ENT (#18149) ### Description <!-- Please describe why you're making this change, in plain English. --> The mock is used in `http_ent_test` file which caused lint failures. For OSS->ENT parity adding the same change here. ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> Identified in OSS->ENT [merge PR](https://github.com/hashicorp/consul-enterprise/pull/6328) ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062) ### Description <!-- Please describe why you're making this change, in plain English. --> - Currently the jwt-auth filter doesn't take into account the service identity when validating jwt-auth, it only takes into account the path and jwt provider during validation. This causes issues when multiple source intentions restrict access to an endpoint with different JWT providers. - To fix these issues, rather than use the JWT auth filter for validation, we use it in metadata mode and allow it to forward the successful validated JWT token payload to the RBAC filter which will make the decisions. This PR ensures requests with and without JWT tokens successfully go through the jwt-authn filter. The filter however only forwards the data for successful/valid tokens. On the RBAC filter level, we check the payload for claims and token issuer + existing rbac rules. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> - This test covers a multi level jwt requirements (requirements at top level and permissions level). It also assumes you have envoy running, you have a redis and a sidecar proxy service registered, and have a way to generate jwks with jwt. I mostly use: https://www.scottbrady91.com/tools/jwt for this. - first write your proxy defaults ``` Kind = "proxy-defaults" name = "global" config { protocol = "http" } ``` - Create two providers ``` Kind = "jwt-provider" Name = "auth0" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjog....." } } ``` ``` Kind = "jwt-provider" Name = "okta" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjogW3...." } } ``` - add a service intention ``` Kind = "service-intentions" Name = "redis" JWT = { Providers = [ { Name = "okta" }, ] } Sources = [ { Name = "*" Permissions = [{ Action = "allow" HTTP = { PathPrefix = "/workspace" } JWT = { Providers = [ { Name = "okta" VerifyClaims = [ { Path = ["aud"] Value = "my_client_app" }, { Path = ["sub"] Value = "5be86359073c434bad2da3932222dabe" } ] }, ] } }, { Action = "allow" HTTP = { PathPrefix = "/" } JWT = { Providers = [ { Name = "auth0" }, ] } }] } ] ``` - generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with different claims than `/workspace` expects and 1 with correct claims - connect to your envoy (change service and address as needed) to view logs and potential errors. You can add: `-- --log-level debug` to see what data is being forwarded ``` consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502 ``` - Make the following requests: ``` curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v Successful request ``` ### TODO * [x] Update test coverage * [ ] update integration tests (follow-up PR) * [x] appropriate backport labels added * Support Consul Connect Envoy Command on Windows (#17694) ### Description Add support for consul connect envoy command on windows. This PR fixes the comments of PR - https://github.com/hashicorp/consul/pull/15114 ### Testing * Built consul.exe from this branch on windows and hosted here - [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Updated the [tutorial](https://developer.hashicorp.com/consul/tutorials/developer-mesh/consul-windows-workloads) and changed the `consul_url.default` value to [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Followed the steps in the tutorial and verified that everything is working as described. ### PR Checklist * [x] updated test coverage * [ ] external facing docs updated * [x] appropriate backport labels added * [x] not a security concern --------- Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * Change docs to say 168h instead of 7d for server_rejoin_age_max (#18154) ### Description Addresses https://github.com/hashicorp/consul/pull/17171#issuecomment-1636930705 * [OSS] test: improve xDS listener code coverage (#18138) test: improve xDS listener code coverage * Re-order expected/actual for assertContainerState in consul container tests (#18157) Re-order expected/actual, consul container tests * group and document make file (#17943) * group and document make file * Add `testing/deployer` (neé `consul-topology`) [NET-4610] (#17823) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> * [NET-4792] Add integrations tests for jwt-auth (#18169) * Add FIPS reference to consul enterprise docs (#18028) * Add FIPS reference to consul enterprise docs * Update website/content/docs/enterprise/index.mdx Co-authored-by: David Yu <dyu@hashicorp.com> * remove support for ecs client (fips) --------- Co-authored-by: David Yu <dyu@hashicorp.com> * add peering_commontopo tests [NET-3700] (#17951) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> * docs - remove Sentinel from enterprise features list (#18176) * Update index.mdx * Update kv.mdx * Update docs-nav-data.json * delete sentinel.mdx * Update redirects.js --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * [NET-4865] Bump golang.org/x/net to 0.12.0 (#18186) Bump golang.org/x/net to 0.12.0 While not necessary to directly address CVE-2023-29406 (which should be handled by using a patched version of Go when building), an accompanying change to HTTP/2 error handling does impact agent code. See https://go-review.googlesource.com/c/net/+/506995 for the HTTP/2 change. Bump this dependency across our submodules as well for the sake of potential indirect consumers of `x/net/http`. * Call resource mutate hook before validate hook (NET-4907) (#18178) * [NET-4865] security: Update Go version to 1.20.6 (#18190) Update Go version to 1.20.6 This resolves [CVE-2023-29406] (https://nvd.nist.gov/vuln/detail/CVE-2023-29406) for uses of the `net/http` standard library. Note that until the follow-up to #18124 is done, the version of Go used in those impacted tests will need to remain on 1.20.5. * Improve XDS test coverage: JWT auth edition (#18183) * Improve XDS test coverage: JWT auth edition more tests * test: xds coverage for jwt listeners --------- Co-authored-by: DanStough <dan.stough@hashicorp.com> * update readme.md (#18191) u[date readme.md * Update submodules to latest following 1.16.0 (#18197) Align all our internal use of submodules on the latest versions. * SEC-090: Automated trusted workflow pinning (2023-07-18) (#18174) Result of tsccr-helper -log-level=info -pin-all-workflows . Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> * Fix Backport Assistant PR commenting (#18200) * Fix Backport Assistant failure PR commenting For general comments on a PR, it looks like you have to use the `/issue` endpoint rather than `/pulls`, which requires commit/other review-specific target details. This matches the endpoint used in `backport-reminder.yml`. * Remove Backport Reminder workflow This is noisy (even when adding multiple labels, individual comments per label are generated), and likely no longer needed: we haven't had this work in a long time due to an expired GH token, and we now have better automation for backport PR assignment. * resource: Pass resource to Write ACL hook instead of just resource Id [NET-4908] (#18192) * Explicitly enable WebSocket upgrades (#18150) This PR explicitly enables WebSocket upgrades in Envoy's UpgradeConfig for all proxy types. (API Gateway, Ingress, and Sidecar.) Fixes #8283 * docs: fix the description of client rpc (#18206) * NET-4804: Add dashboard for monitoring consul-k8s (#18208) * [OSS] Improve xDS Code Coverage - Clusters (#18165) test: improve xDS cluster code coverage * NET-4222 take config file consul container (#18218) Net 4222 take config file consul container * Envoy Integration Test Windows (#18007) * [CONSUL-395] Update check_hostport and Usage (#40) * [CONSUL-397] Copy envoy binary from Image (#41) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix --------- Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * fix typos and update ecs compat table (#18215) * fix typos and update ecs compat table * real info for the ecs compat matrix table * Update website/content/docs/ecs/compatibility.mdx Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * [OSS] proxystate: add proxystate protos (#18216) * proxystate: add proxystate protos to pbmesh and resolve imports and conflicts between message names * ci: don't verify s390x (#18224) * [CC-5718] Remove HCP token requirement during bootstrap (#18140) * [CC-5718] Remove HCP token requirement during bootstrap * Re-add error for loading HCP management token * Remove old comment * Add changelog entry * Remove extra validation line * Apply suggestions from code review Co-authored-by: lornasong <lornasong@users.noreply.github.com> --------- Co-authored-by: lornasong <lornasong@users.noreply.github.com> * [NET-4122] Doc guidance for federation with externalServers (#18207) Doc guidance for federation with externalServers Add guidance for proper configuration when joining to a secondary cluster using WAN fed with external servers also enabled. Also clarify federation requirements and fix formatting for an unrelated value. Update both the Helm chart reference (synced from `consul-k8s`, see hashicorp/consul-k8s#2583) and the docs on using `externalServers`. * [OSS] Improve xDS Code Coverage - Endpoints and Misc (#18222) test: improve xDS endpoints code coverage * Clarify license reporting timing and GDPR compliance (#18237) Add Alicia's edits to clarify log timing and other details * Fix Github Workflow File (#18241) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix * Fix tests --------- Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236) * Align build arch matrix with enterprise (#18235) Ensure that OSS remains in sync w/ Enterprise by aligning the format of arch matrix args for various build jobs. * Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes" (#18248) Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236)" This reverts commit a11dba710e6ce6f172c0fa6c9b61567cc1efffc8. * resource: Add scope to resource type registration [NET-4976] (#18214) Enables querying a resource type's registration to determine if a resource is cluster, partition, or partition and namespace scoped. * Fix some inconsistencies in jwt docs (#18234) * NET-1825: More new ACL token creation docs (#18063) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [CC-5719] Add support for builtin global-read-only policy * Add changelog * Add read-only to docs * Fix some minor issues. * Change from ReplaceAll to Sprintf * Change IsValidPolicy name to return an error instead of bool * Fix PolicyList test * Fix other tests * Apply suggestions from code review Co-authored-by: Paul Glass <pglass@hashicorp.com> * Fix state store test for policy list. * Fix naming issues * Update acl/validation.go Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * backport of commit d63fa5481dc02c6faae7cc2647b4073b3286af1d * backport of commit 3d099a6ed8ed10b6dc464c466cb1668914db8f08 --------- Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Dan Bond <danbond@protonmail.com> Signed-off-by: josh <josh.timmons@hashicorp.com> Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> Co-authored-by: Ronald <roncodingenthusiast@users.noreply.github.com> Co-authored-by: Eric Haberkorn <erichaberkorn@gmail.com> Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com> Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Bryce Kalow <bkalow@hashicorp.com> Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: Matt Keeler <mkeeler@users.noreply.github.com> Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> Co-authored-by: Poonam Jadhav <poonam.jadhav@hashicorp.com> Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> Co-authored-by: Hariram Sankaran <56744845+ramramhariram@users.noreply.github.com> Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: Thomas Eckert <teckert@hashicorp.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Co-authored-by: Joshua Timmons <josh.timmons@hashicorp.com> Co-authored-by: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: Curt Bushko <cbushko@gmail.com> Co-authored-by: Tobias Birkefeld <t@craxs.de> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Co-authored-by: Semir Patel <semir.patel@hashicorp.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chappie <6537530+chapmanc@users.noreply.github.com> Co-authored-by: Derek Menteer <105233703+hashi-derek@users.noreply.github.com> Co-authored-by: John Murret <john.murret@hashicorp.com> Co-authored-by: Mark Campbell-Vincent <mnmvincent@gmail.com> Co-authored-by: Daniel Upton <daniel@floppy.co> Co-authored-by: Steven Zamborsky <97125550+stevenzamborsky@users.noreply.github.com> Co-authored-by: George Bolo <george.bolo@gmail.com> Co-authored-by: Chris S. Kim <ckim@hashicorp.com> Co-authored-by: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Co-authored-by: cskh <hui.kang@hashicorp.com> Co-authored-by: V. K <cn007b@gmail.com> Co-authored-by: Iryna Shustava <ishustava@users.noreply.github.com> Co-authored-by: Alex Simenduev <shamil.si@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: Dan Bond <danbond@protonmail.com> Co-authored-by: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Co-authored-by: Gerard Nguyen <gerard@hashicorp.com> Co-authored-by: mr-miles <miles.waller@gmail.com> Co-authored-by: natemollica-dev <57850649+natemollica-nm@users.noreply.github.com> Co-authored-by: John Maguire <john.maguire@hashicorp.com> Co-authored-by: Samantha <hello@entropy.cat> Co-authored-by: Ranjandas <thejranjan@gmail.com> Co-authored-by: Evan Phoenix <evan@phx.io> Co-authored-by: Michael Hofer <karras@users.noreply.github.com> Co-authored-by: J.C. Jones <james.jc.jones@gmail.com> Co-authored-by: Fulvio <fulviodenza823@gmail.com> Co-authored-by: Krastin Krastev <krastin@hashicorp.com> Co-authored-by: david3a <49253132+david3a@users.noreply.github.com> Co-authored-by: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Co-authored-by: Tom Davies <tom@t-davies.com> Co-authored-by: Vijay <vijayraghav22@gmail.com> Co-authored-by: Eddie Rowe <74205376+eddie-rowe@users.noreply.github.com> Co-authored-by: emilymianeil <eneil@hashicorp.com> Co-authored-by: nv-hashi <80716011+nv-hashi@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> Co-authored-by: hashicorp-tsccr[bot] <129506189+hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: Blake Covarrubias <blake@covarrubi.as> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com> Co-authored-by: Jeremy Jacobson <jjacobson93@users.noreply.github.com> Co-authored-by: lornasong <lornasong@users.noreply.github.com> Co-authored-by: Judith Malnick <judith@hashicorp.com> Co-authored-by: Jeremy Jacobson <jeremy.jacobson@hashicorp.com>
2023-08-01 17:37:13 +00:00
if err := acl.ValidatePolicyName(policy.Name); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
2019-04-30 15:45:36 +00:00
var idMatch *structs.ACLPolicy
var nameMatch *structs.ACLPolicy
var err error
if policy.ID != "" {
if _, err := uuid.ParseUUID(policy.ID); err != nil {
return fmt.Errorf("Policy ID invalid UUID")
}
_, idMatch, err = state.ACLPolicyGetByID(nil, policy.ID, nil)
2019-04-30 15:45:36 +00:00
if err != nil {
return fmt.Errorf("acl policy lookup by id failed: %v", err)
}
}
_, nameMatch, err = state.ACLPolicyGetByName(nil, policy.Name, &policy.EnterpriseMeta)
2019-04-30 15:45:36 +00:00
if err != nil {
return fmt.Errorf("acl policy lookup by name failed: %v", err)
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if policy.ID == "" {
// with no policy ID one will be generated
var err error
policy.ID, err = lib.GenerateUUID(a.srv.checkPolicyUUID)
if err != nil {
return err
}
// validate the name is unique
2019-04-30 15:45:36 +00:00
if nameMatch != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return fmt.Errorf("Invalid Policy: A Policy with Name %q already exists", policy.Name)
}
} else {
// Verify the policy exists
2019-04-30 15:45:36 +00:00
if idMatch == nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return fmt.Errorf("cannot find policy %s", policy.ID)
}
2019-04-30 15:45:36 +00:00
// Verify that the name isn't changing or that the name is not already used
if idMatch.Name != policy.Name && nameMatch != nil {
return fmt.Errorf("Invalid Policy: A policy with name %q already exists", policy.Name)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
Backport of [CC-5719] Add support for builtin global-read-only policy into release/1.16.x (#18345) * [OSS] Post Consul 1.16 updates (#17606) * chore: update dev build to 1.17 * chore(ci): add nightly 1.16 test Drop the oldest and add the newest running release branch to nightly builds. * Add writeAuditRPCEvent to agent_oss (#17607) * Add writeAuditRPCEvent to agent_oss * fix the other diffs * backport change log * Add Envoy and Consul version constraints to Envoy extensions (#17612) * [API Gateway] Fix trust domain for external peered services in synthesis code (#17609) * [API Gateway] Fix trust domain for external peered services in synthesis code * Add changelog * backport ent changes to oss (#17614) * backport ent changes to oss * Update .changelog/_5669.txt Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> --------- Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> * Update intentions.mdx (#17619) Make behaviour of L7 intentions clearer * enterprise changelog update for audit (#17625) * Update list of Envoy versions (#17546) * [API Gateway] Fix rate limiting for API gateways (#17631) * [API Gateway] Fix rate limiting for API gateways * Add changelog * Fix failing unit tests * Fix operator usage tests for api package * sort some imports that are wonky between oss and ent (#17637) * PmTLS and tproxy improvements with failover and L7 traffic mgmt for k8s (#17624) * porting over changes from enterprise repo to oss * applied feedback on service mesh for k8s overview * fixed typo * removed ent-only build script file * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Delete check-legacy-links-format.yml (#17647) * docs: Reference doc updates for permissive mTLS settings (#17371) * Reference doc updates for permissive mTLS settings * Document config entry filtering * Fix minor doc errors (double slashes in link url paths) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add generic experiments configuration and use it to enable catalog v2 resources (#17604) * Add generic experiments configuration and use it to enable catalog v2 resources * Run formatting with -s as CI will validate that this has been done * api-gateway: stop adding all header filters to virtual host when generating xDS (#17644) * Add header filter to api-gateway xDS golden test * Stop adding all header filters to virtual host when generating xDS for api-gateway * Regenerate xDS golden file for api-gateway w/ header filter * fix: add agent info reporting log (#17654) * Add new Consul 1.16 docs (#17651) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * fix build errors --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Default `ProxyType` for builtin extensions (#17657) * Post 1.16.0-rc1 updates (#17663) - Update changelog to include new entries from release - Update submodule versions to latest published * Update service-defaults.mdx (#17656) * docs: Sameness Groups (#17628) * port from enterprise branch * Apply suggestions from code review Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx * next steps * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Remove "BETA" marker from config entries (#17670) * CAPIgw for K8s installation updates for 1.16 (#17627) * trimmed CRD step and reqs from installation * updated tech specs * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * added upgrade instruction * removed tcp port req * described downtime and DT-less upgrades * applied additional review feedback --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * additional feedback on API gateway upgrades (#17677) * additional feedback * Update website/content/docs/api-gateway/upgrades.mdx Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> --------- Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * docs: JWT Authorization for intentions (#17643) * Initial page/nav creation * configuration entry reference page * Usage + fixes * service intentions page * usage * description * config entry updates * formatting fixes * Update website/content/docs/connect/config-entries/service-intentions.mdx Co-authored-by: Paul Glass <pglass@hashicorp.com> * service intentions review fixes * Overview page review fixes * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: minor fixes to JWT auth docs (#17680) * Fixes * service intentions fixes * Fix two WAL metrics in docs/agent/telemetry.mdx (#17593) * updated failover for k8s w-tproxy page title (#17683) * Add release notes 1.16 rc (#17665) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Add release notes for 1.16-rc * Add consul-e license utlization reporting * Update with rc absolute links * Update with rc absolute links * fix typo * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update to use callout component * address typo * docs: FIPS 140-2 Compliance (#17668) * Page + nav + formatting * link fix * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * link fix * Apply suggestions from code review Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix apigw install values file * fix typos in release notes --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix release notes links (#17687) * adding redirects for tproxy and envoy extensions (#17688) * adding redirects * Apply suggestions from code review * Fix FIPS copy (#17691) * fix release notes links * fix typos on fips docs * [NET-4107][Supportability] Log Level set to TRACE and duration set to 5m for consul-debug (#17596) * changed duration to 5 mins and log level to trace * documentation update * change log * ENT merge of ext-authz extension updates (#17684) * docs: Update default values for Envoy extension proxy types (#17676) * fix: stop peering delete routine on leader loss (#17483) * Refactor disco chain prioritize by locality structs (#17696) This includes prioritize by localities on disco chain targets rather than resolvers, allowing different targets within the same partition to have different policies. * agent: remove agent cache dependency from service mesh leaf certificate management (#17075) * agent: remove agent cache dependency from service mesh leaf certificate management This extracts the leaf cert management from within the agent cache. This code was produced by the following process: 1. All tests in agent/cache, agent/cache-types, agent/auto-config, agent/consul/servercert were run at each stage. - The tests in agent matching .*Leaf were run at each stage. - The tests in agent/leafcert were run at each stage after they existed. 2. The former leaf cert Fetch implementation was extracted into a new package behind a "fake RPC" endpoint to make it look almost like all other cache type internals. 3. The old cache type was shimmed to use the fake RPC endpoint and generally cleaned up. 4. I selectively duplicated all of Get/Notify/NotifyCallback/Prepopulate from the agent/cache.Cache implementation over into the new package. This was renamed as leafcert.Manager. - Code that was irrelevant to the leaf cert type was deleted (inlining blocking=true, refresh=false) 5. Everything that used the leaf cert cache type (including proxycfg stuff) was shifted to use the leafcert.Manager instead. 6. agent/cache-types tests were moved and gently replumbed to execute as-is against a leafcert.Manager. 7. Inspired by some of the locking changes from derek's branch I split the fat lock into N+1 locks. 8. The waiter chan struct{} was eventually replaced with a singleflight.Group around cache updates, which was likely the biggest net structural change. 9. The awkward two layers or logic produced as a byproduct of marrying the agent cache management code with the leaf cert type code was slowly coalesced and flattened to remove confusion. 10. The .*Leaf tests from the agent package were copied and made to work directly against a leafcert.Manager to increase direct coverage. I have done a best effort attempt to port the previous leaf-cert cache type's tests over in spirit, as well as to take the e2e-ish tests in the agent package with Leaf in the test name and copy those into the agent/leafcert package to get more direct coverage, rather than coverage tangled up in the agent logic. There is no net-new test coverage, just coverage that was pushed around from elsewhere. * [core]: Pin github action workflows (#17695) * docs: missing changelog for _5517 (#17706) * add enterprise notes for IP-based rate limits (#17711) * add enterprise notes for IP-based rate limits * Apply suggestions from code review Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * added bolded 'Enterprise' in list items. --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Update compatibility.mdx (#17713) * Remove extraneous version info for Config entries (#17716) * Update terminating-gateway.mdx * Update exported-services.mdx * Update mesh.mdx * fix: typo in link to section (#17527) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Bump Alpine to 3.18 (#17719) * Update Dockerfile * Create 17719.txt * NET-1825: New ACL token creation docs (#16465) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * [NET-3865] [Supportability] Additional Information in the output of 'consul operator raft list-peers' (#17582) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * OSS merge: Update error handling login when applying extensions (#17740) * Bump atlassian/gajira-transition from 3.0.0 to 3.0.1 (#17741) Bumps [atlassian/gajira-transition](https://github.com/atlassian/gajira-transition) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/atlassian/gajira-transition/releases) - [Commits](https://github.com/atlassian/gajira-transition/compare/4749176faf14633954d72af7a44d7f2af01cc92b...38fc9cd61b03d6a53dd35fcccda172fe04b36de3) --- updated-dependencies: - dependency-name: atlassian/gajira-transition dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add truncation to body (#17723) * docs: Failover overview minor fix (#17743) * Incorrect symbol * Clarification * slight edit for clarity * docs - update Envoy and Dataplane compat matrix (#17752) * Update envoy.mdx added more detail around default versus other compatible versions * validate localities on agent configs and registration endpoints (#17712) * Updated docs added explanation. (#17751) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Update index.mdx (#17749) * added redirects and updated links (#17764) * Add transparent proxy enhancements changelog (#17757) * docs - remove use of consul leave during upgrade instructions (#17758) * Fix issue with streaming service health watches. (#17775) Fix issue with streaming service health watches. This commit fixes an issue where the health streams were unaware of service export changes. Whenever an exported-services config entry is modified, it is effectively an ACL change. The bug would be triggered by the following situation: - no services are exported - an upstream watch to service X is spawned - the streaming backend filters out data for service X (due to lack of exports) - service X is finally exported In the situation above, the streaming backend does not trigger a refresh of its data. This means that any events that were supposed to have been received prior to the export are NOT backfilled, and the watches never see service X spawning. We currently have decided to not trigger a stream refresh in this situation due to the potential for a thundering herd effect (touching exports would cause a re-fetch of all watches for that partition, potentially). Therefore, a local blocking-query approach was added by this commit for agentless. It's also worth noting that the streaming subscription is currently bypassed most of the time with agentful, because proxycfg has a `req.Source.Node != ""` which prevents the `streamingEnabled` check from passing. This means that while agents should technically have this same issue, they don't experience it with mesh health watches. Note that this is a temporary fix that solves the issue for proxycfg, but not service-discovery use cases. * Property Override validation improvements (#17759) * Reject inbound Prop Override patch with Services Services filtering is only supported for outbound TrafficDirection patches. * Improve Prop Override unexpected type validation - Guard against additional invalid parent and target types - Add specific error handling for Any fields (unsupported) * Fixes (#17765) * Update license get explanation (#17782) This PR is to clarify what happens if the license get command is run on a follower if the leader hasn't been updated with a newer license. * Add Patch index to Prop Override validation errors (#17777) When a patch is found invalid, include its index for easier debugging when multiple patches are provided. * Stop referenced jwt providers from being deleted (#17755) * Stop referenced jwt providers from being deleted * Implement a Catalog Controllers Lifecycle Integration Test (#17435) * Implement a Catalog Controllers Lifecycle Integration Test * Prevent triggering the race detector. This allows defining some variables for protobuf constants and using those in comparisons. Without that, something internal in the fmt package ended up looking at the protobuf message size cache and triggering the race detector. * HCP Add node id/name to config (#17750) * Catalog V2 Container Based Integration Test (#17674) * Implement the Catalog V2 controller integration container tests This now allows the container tests to import things from the root module. However for now we want to be very restrictive about which packages we allow importing. * Add an upgrade test for the new catalog Currently this should be dormant and not executed. However its put in place to detect breaking changes in the future and show an example of how to do an upgrade test with integration tests structured like catalog v2. * Make testutil.Retry capable of performing cleanup operations These cleanup operations are executed after each retry attempt. * Move TestContext to taking an interface instead of a concrete testing.T This allows this to be used on a retry.R or generally anything that meets the interface. * Move to using TestContext instead of background contexts Also this forces all test methods to implement the Cleanup method now instead of that being an optional interface. Co-authored-by: Daniel Upton <daniel@floppy.co> * Fix Docs for Trails Leader By (#17763) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added * fix doc * fix docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Improve Prop Override docs examples (#17799) - Provide more realistics examples for setting properties not already supported natively by Consul - Remove superfluous commas from HCL, correct target service name, and fix service defaults vs. proxy defaults in examples - Align existing integration test to updated docs * Test permissive mTLS filter chain not configured with tproxy disabled (#17747) * Add documentation for remote debugging of integration tests. (#17800) * Add documentation for remote debugging of integration tests. * add link from main docs page. * changes related to PR feedback * Clarify limitations of Prop Override extension (#17801) Explicitly document the limitations of the extension, particularly what kind of fields it is capable of modifying. * Fix formatting for webhook-certs Consul tutorial (#17810) * Fix formatting for webhook-certs Consul tutorial * Make a small grammar change to also pick up whitespace changes necessary for formatting --------- Co-authored-by: David Yu <dyu@hashicorp.com> * Add jwt-authn metrics to jwt-provider docs (#17816) * [NET-3095] add jwt-authn metrics docs * Change URLs for redirects from RC to default latest (#17822) * Set GOPRIVATE for all hashicorp repos in CI (#17817) Consistently set GOPRIVATE to include all hashicorp repos, s.t. private modules are successfully pulled in enterprise CI. * Make locality aware routing xDS changes (#17826) * Fixup consul-container/test/debugging.md (#17815) Add missing `-t` flag and fix minor typo. * fixes #17732 - AccessorID in request body should be optional when updating ACL token (#17739) * AccessorID in request body should be optional when updating ACL token * add a test case * fix test case * add changelog entry for PR #17739 * CA provider doc updates and Vault provider minor update (#17831) Update CA provider docs Clarify that providers can differ between primary and secondary datacenters Provide a comparison chart for consul vs vault CA providers Loosen Vault CA provider validation for RootPKIPath Update Vault CA provider documentation * ext-authz Envoy extension: support `localhost` as a valid target URI. (#17821) * CI Updates (#17834) * Ensure that git access to private repos uses the ELEVATED_GITHUB_TOKEN * Bump the runner size for the protobuf generation check This has failed previously when the runner process that communicates with GitHub gets starved causing the job to fail. * counter part of ent pr (#17618) * watch: support -filter for consul watch: checks, services, nodes, service (#17780) * watch: support -filter for watch checks * Add filter for watch nodes, services, and service - unit test added - Add changelog - update doc * Trigger OSS => ENT merge for all release branches (#17853) Previously, this only triggered for release/*.*.x branches; however, our release process involves cutting a release/1.16.0 branch, for example, at time of code freeze these days. Any PRs to that branch after code freeze today do not make their way to consul-enterprise. This will make behavior for a .0 branch consistent with current behavior for a .x branch. * Update service-mesh.mdx (#17845) Deleted two commas which looks quite like some leftovers. * Add docs for sameness groups with resolvers. (#17851) * docs: add note about path prefix matching behavior for HTTPRoute config (#17860) * Add note about path prefix matching behavior for HTTPRoute config * Update website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: update upgrade to consul-dataplane docs on k8s (#17852) * resource: add `AuthorizerContext` helper method (#17393) * resource: enforce consistent naming of resource types (#17611) For consistency, resource type names must follow these rules: - `Group` must be snake case, and in most cases a single word. - `GroupVersion` must be lowercase, start with a "v" and end with a number. - `Kind` must be pascal case. These were chosen because they map to our protobuf type naming conventions. * tooling: generate protoset file (#17364) Extends the `proto` make target to generate a protoset file for use with grpcurl etc. * Fix a bug that wrongly trims domains when there is an overlap with DC name (#17160) * Fix a bug that wrongly trims domains when there is an overlap with DC name Before this change, when DC name and domain/alt-domain overlap, the domain name incorrectly trimmed from the query. Example: Given: datacenter = dc-test, alt-domain = test.consul. Querying for "test-node.node.dc-test.consul" will faile, because the code was trimming "test.consul" instead of just ".consul" This change, fixes the issue by adding dot (.) before trimming * trimDomain: ensure domain trimmed without modyfing original domains * update changelog --------- Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * deps: aws-sdk-go v1.44.289 (#17876) Signed-off-by: Dan Bond <danbond@protonmail.com> * api-gateway: add operation cannot be fulfilled error to common errors (#17874) * add error message * Update website/content/docs/api-gateway/usage/errors.mdx Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * fix formating issues --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * api-gateway: add step to upgrade instructions for creating intentions (#17875) * Changelog - add 1.13.9, 1.14.8, and 1.15.4 (#17889) * docs: update config enable_debug (#17866) * update doc for config enable_debug * Update website/content/docs/agent/config/config-files.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update wording on WAN fed and intermediate_pki_path (#17850) * Allow service identity tokens the ability to read jwt-providers (#17893) * Allow service identity tokens the ability to read jwt-providers * more tests * service_prefix tests * Update docs (#17476) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add emit_tags_as_labels to envoy bootstrap config when using Consul Telemetry Collector (#17888) * Fix command from kg to kubectl get (#17903) * Create and update release notes for 1.16 and 1.2 (#17895) * update release notes for 1.16 and 1.2 * update latest consul core release * Propose new changes to APIgw upgrade instructions (#17693) * Propose new changes to APIgw upgrade instructions * fix build error * update callouts to render correctly * Add hideClipboard to log messages * Added clarification around consul k8s and crds * Add workflow to verify linux release packages (#17904) * adding docker files to verify linux packages. * add verifr-release-linux.yml * updating name * pass inputs directly into jobs * add other linux package platforms * remove on push * fix TARGETARCH on debian and ubuntu so it can check arm64 and amd64 * fixing amazon to use the continue line * add ubuntu i386 * fix comment lines * working * remove commented out workflow jobs * Apply suggestions from code review Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * update fedora and ubuntu to use latest tag --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * Reference hashicorp/consul instead of consul for Docker image (#17914) * Reference hashicorp/consul instead of consul for Docker image * Update Make targets that pull consul directly * Update Consul K8s Upgrade Doc Updates (#17921) Updating upgrade procedures to encompass expected errors during upgrade process from v1.13.x to v1.14.x. * Update sameness-group.mdx (#17915) * Update create-sameness-groups.mdx (#17927) * deps: coredns v1.10.1 (#17912) * Ensure RSA keys are at least 2048 bits in length (#17911) * Ensure RSA keys are at least 2048 bits in length * Add changelog * update key length check for FIPS compliance * Fix no new variables error and failing to return when error exists from validating * clean up code for better readability * actually return value * tlsutil: Fix check TLS configuration (#17481) * tlsutil: Fix check TLS configuration * Rewording docs. * Update website/content/docs/services/configuration/checks-configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Fix typos and add changelog entry. --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Deprecations for connect-native SDK and specific connect native APIs (#17937) * Update v1_16_x.mdx * Update connect native golang page --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Revert "Add workflow to verify linux release packages (#17904)" (#17942) This reverts commit 3368f14fab500ebe9f6aeab5631dd1d5f5a453e5. * Fixes Secondary ConnectCA update (#17846) This fixes a bug that was identified which resulted in subsequent ConnectCA configuration update not to persist in the cluster. * fixing typo in link to jwt-validations-with-intentions doc (#17955) * Fix streaming backend link (#17958) * Fix streaming backend link * Update health.mdx * Dynamically create jwks clusters for jwt-providers (#17944) * website: remove deprecated agent rpc docs (#17962) * Fix missing BalanceOutboundConnections in v2 catalog. (#17964) * feature - [NET - 4005] - [Supportability] Reloadable Configuration - enable_debug (#17565) * # This is a combination of 9 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same * # This is a combination of 12 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same # This is the commit message #10: made enable debug atomic bool # This is the commit message #11: fix lint # This is the commit message #12: fix test true enable debug * parent 10f500e895d92cc3691ade7b74a33db755d22039 author absolutelightning <ashesh.vidyut@hashicorp.com> 1687352587 +0530 committer absolutelightning <ashesh.vidyut@hashicorp.com> 1687352592 +0530 init without tests change log fix tests fix tests added tests change log breaking change removed breaking change fix test keeping the test behaviour same made enable debug atomic bool fix lint fix test true enable debug using enable debug in agent as atomic bool test fixes fix tests fix tests added update on correct locaiton fix tests fix reloadable config enable debug fix tests fix init and acl 403 * revert commit * Fix formatting codeblocks on APIgw docs (#17970) * fix formatting codeblocks * remove unnecessary indents * Remove POC code (#17974) * update doc (#17910) * update doc * update link * Remove duplicate and unused newDecodeConfigEntry func (#17979) * docs: samenessGroup YAML examples (#17984) * configuration entry syntax * Example config * Add changelog entry for 1.16.0 (#17987) * Fix typo (#17198) servcies => services * Expose JWKS cluster config through JWTProviderConfigEntry (#17978) * Expose JWKS cluster config through JWTProviderConfigEntry * fix typos, rename trustedCa to trustedCA * Integration test for ext-authz Envoy extension (#17980) * Fix incorrect protocol for transparent proxy upstreams. (#17894) This PR fixes a bug that was introduced in: https://github.com/hashicorp/consul/pull/16021 A user setting a protocol in proxy-defaults would cause tproxy implicit upstreams to not honor the upstream service's protocol set in its `ServiceDefaults.Protocol` field, and would instead always use the proxy-defaults value. Due to the fact that upstreams configured with "tcp" can successfully contact upstream "http" services, this issue was not recognized until recently (a proxy-defaults with "tcp" and a listening service with "http" would make successful requests, but not the opposite). As a temporary work-around, users experiencing this issue can explicitly set the protocol on the `ServiceDefaults.UpstreamConfig.Overrides`, which should take precedence. The fix in this PR removes the proxy-defaults protocol from the wildcard upstream that tproxy uses to configure implicit upstreams. When the protocol was included, it would always overwrite the value during discovery chain compilation, which was not correct. The discovery chain compiler also consumes proxy defaults to determine the protocol, so simply excluding it from the wildcard upstream config map resolves the issue. * feat: include nodes count in operator usage endpoint and cli command (#17939) * feat: update operator usage api endpoint to include nodes count * feat: update operator usange cli command to includes nodes count * [OSS] Improve Gateway Test Coverage of Catalog Health (#18011) * fix(cli): remove failing check from 'connect envoy' registration for api gateway * test(integration): add tests to check catalog statsus of gateways on startup * remove extra sleep comment * Update test/integration/consul-container/libs/assert/service.go * changelog * Fixes Traffic rate limitting docs (#17997) * Fix removed service-to-service peering links (#17221) * docs: fix removed service-to-service peering links * docs: extend peering-via-mesh-gateways intro (thanks @trujillo-adam) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Sameness "beta" warning (#18017) * Warning updates * .x * updated typo in tab heading (#18022) * updated typo in tab heading * updated tab group typo, too * Document that DNS lookups can target cluster peers (#17990) Static DNS lookups, in addition to explicitly targeting a datacenter, can target a cluster peer. This was added in 95dc0c7b301b70a6b955a8b7c9737c9b86f03df6 but didn't make the documentation. The driving function for the change is `parseLocality` here: https://github.com/hashicorp/consul/blob/0b1299c28d8127129d61310ee4280055298438e0/agent/dns_oss.go#L25 The biggest change in this is to adjust the standard lookup syntax to tie `.<datacenter>` to `.dc` as required-together, and to append in the similar `.<cluster-peer>.peer` optional argument, both to A record and SRV record lookups. Co-authored-by: David Yu <dyu@hashicorp.com> * Add first integration test for jwt auth with intention (#18005) * fix stand-in text for name field (#18030) * removed sameness conf entry from failover nav (#18033) * docs - add service sync annotations and k8s service weight annotation (#18032) * Docs for https://github.com/hashicorp/consul-k8s/pull/2293 * remove versions for enterprise features since they are old --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * docs - add jobs use case for service mesh k8s (#18037) * docs - add jobs use case for service mesh k8s * add code blocks * address feedback (#18045) * Add verify server hostname to tls default (#17155) * [OSS] Fix initial_fetch_timeout to wait for all xDS resources (#18024) * fix(connect): set initial_fetch_time to wait indefinitely * changelog * PR feedback 1 * ui: fix typos for peer service imports (#17999) * test: fix FIPS inline cert test message (#18076) * Fix a couple typos in Agent Telemetry Metrics docs (#18080) * Fix metrics docs * Add changelog Signed-off-by: josh <josh.timmons@hashicorp.com> --------- Signed-off-by: josh <josh.timmons@hashicorp.com> * docs updates - cluster peering and virtual services (#18069) * Update route-to-virtual-services.mdx * Update establish-peering.mdx * Update service-mesh-compare.mdx (#17279) grammar change * Update helm docs on main (#18085) * ci: use gotestsum v1.10.1 [NET-4042] (#18088) * Docs: Update proxy lifecycle annotations and consul-dataplane flags (#18075) * Update proxy lifecycle annotations and consul-dataplane flags * Pass configured role name to Vault for AWS auth in Connect CA (#17885) * Docs for dataplane upgrade on k8s (#18051) * Docs for dataplane upgrade on k8s --------- Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs - update upgrade index page to not recommend consul leave. (#18100) * Displays Consul version of each nodes in UI nodes section (#17754) * update UINodes and UINodeInfo response with consul-version info added as NodeMeta, fetched from serf members * update test cases TestUINodes, TestUINodeInfo * added nil check for map * add consul-version in local agent node metadata * get consul version from serf member and add this as node meta in catalog register request * updated ui mock response to include consul versions as node meta * updated ui trans and added version as query param to node list route * updates in ui templates to display consul version with filter and sorts * updates in ui - model class, serializers,comparators,predicates for consul version feature * added change log for Consul Version Feature * updated to get version from consul service, if for some reason not available from serf * updated changelog text * updated dependent testcases * multiselection version filter * Update agent/consul/state/catalog.go comments updated Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --------- Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * api gw 1.16 updates (#18081) * api gw 1.16 updates * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * update CodeBlockConfig filename * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * remove non-standard intentions page * Update website/content/docs/api-gateway/configuration/index.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [NET-4103] ci: build s390x (#18067) * ci: build s390x * ci: test s390x * ci: dev build s390x * no GOOS * ent only * build: publish s390x * fix syntax error * fix syntax error again * fix syntax error again x2 * test branch * Move s390x conditionals to step level * remove test branch --------- Co-authored-by: emilymianeil <eneil@hashicorp.com> * :ermahgerd "Sevice Mesh" -> "Service Mesh" (#18116) Just a typo in the docs. * Split pbmesh.UpstreamsConfiguration as a resource out of pbmesh.Upstreams (#17991) Configuration that previously was inlined into the Upstreams resource applies to both explicit and implicit upstreams and so it makes sense to split it out into its own resource. It also has other minor changes: - Renames `proxy.proto` proxy_configuration.proto` - Changes the type of `Upstream.destination_ref` from `pbresource.ID` to `pbresource.Reference` - Adds comments to fields that didn't have them * [NET-4895] ci - api tests and consul container tests error because of dependency bugs with go 1.20.6. Pin go to 1.20.5. (#18124) ### Description The following jobs started failing when go 1.20.6 was released: - `go-test-api-1-19` - `go-test-api-1-20` - `compatibility-integration-tests` - `upgrade-integration-tests` `compatibility-integration-tests` and `compatibility-integration-tests` to this testcontainers issue: https://github.com/testcontainers/testcontainers-go/issues/1359. This issue calls for testcontainers to release a new version when one of their dependencies is fixed. When that is done, we will unpin the go versions in `compatibility-integration-tests` and `compatibility-integration-tests`. ### Testing & Reproduction steps See these jobs broken in CI and then see them work with this PR. --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * Add ingress gateway deprecation notices to docs (#18102) ### Description This adds notices, that ingress gateway is deprecated, to several places in the product docs where ingress gateway is the topic. ### Testing & Reproduction steps Tested with a local copy of the website. ### Links Deprecation of ingress gateway was announced in the Release Notes for Consul 1.16 and Consul-K8s 1.2. See: [https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated ) [https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated) ### PR Checklist * [N/A] updated test coverage * [X] external facing docs updated * [X] appropriate backport labels added * [X] not a security concern --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add docs for jwt cluster configuration (#18004) ### Description <!-- Please describe why you're making this change, in plain English. --> - Add jwt-provider docs for jwks cluster configuration. The configuration was added here: https://github.com/hashicorp/consul/pull/17978 * Docs: fix unmatched bracket for health checks page (#18134) * NET-4657/add resource service client (#18053) ### Description <!-- Please describe why you're making this change, in plain English. --> Dan had already started on this [task](https://github.com/hashicorp/consul/pull/17849) which is needed to start building the HTTP APIs. This just needed some cleanup to get it ready for review. Overview: - Rename `internalResourceServiceClient` to `insecureResourceServiceClient` for name consistency - Configure a `secureResourceServiceClient` with auth enabled ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Fix bug with Vault CA provider (#18112) Updating RootPKIPath but not IntermediatePKIPath would not update leaf signing certs with the new root. Unsure if this happens in practice but manual testing showed it is a bug that would break mesh and agent connections once the old root is pruned. * [NET-4897] net/http host header is now verified and request.host that contains socked now error (#18129) ### Description This is related to https://github.com/hashicorp/consul/pull/18124 where we pinned the go versions in CI to 1.20.5 and 1.19.10. go 1.20.6 and 1.19.11 now validate request host headers for validity, including the hostname cannot be prefixed with slashes. For local communications (npipe://, unix://), the hostname is not used, but we need valid and meaningful hostname. Prior versions go Go would clean the host header, and strip slashes in the process, but go1.20.6 and go1.19.11 no longer do, and reject the host header. Around the community we are seeing that others are intercepting the req.host and if it starts with a slash or ends with .sock, they changing the host to localhost or another dummy value. [client: define a "dummy" hostname to use for local connections by thaJeztah · Pull Request #45942 · moby/moby](https://github.com/moby/moby/pull/45942) ### Testing & Reproduction steps Check CI tests. ### Links * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * add a conditional around setting LANFilter.AllSegments to make sure it is valid (#18139) ### Description This is to correct a code problem because this assumes all segments, but when you get to Enterprise, you can be in partition that is not the default partition, in which case specifying all segments does not validate and fails. This is to correct the setting of this filter with `AllSegments` to `true` to only occur when in the the `default` partition. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> ### PR Checklist * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * chore: bump upgrade integrations tests to 1.15, 116 [NET-4743] (#18130) * re org resource type registry (#18133) * fix: update delegateMock used in ENT (#18149) ### Description <!-- Please describe why you're making this change, in plain English. --> The mock is used in `http_ent_test` file which caused lint failures. For OSS->ENT parity adding the same change here. ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> Identified in OSS->ENT [merge PR](https://github.com/hashicorp/consul-enterprise/pull/6328) ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062) ### Description <!-- Please describe why you're making this change, in plain English. --> - Currently the jwt-auth filter doesn't take into account the service identity when validating jwt-auth, it only takes into account the path and jwt provider during validation. This causes issues when multiple source intentions restrict access to an endpoint with different JWT providers. - To fix these issues, rather than use the JWT auth filter for validation, we use it in metadata mode and allow it to forward the successful validated JWT token payload to the RBAC filter which will make the decisions. This PR ensures requests with and without JWT tokens successfully go through the jwt-authn filter. The filter however only forwards the data for successful/valid tokens. On the RBAC filter level, we check the payload for claims and token issuer + existing rbac rules. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> - This test covers a multi level jwt requirements (requirements at top level and permissions level). It also assumes you have envoy running, you have a redis and a sidecar proxy service registered, and have a way to generate jwks with jwt. I mostly use: https://www.scottbrady91.com/tools/jwt for this. - first write your proxy defaults ``` Kind = "proxy-defaults" name = "global" config { protocol = "http" } ``` - Create two providers ``` Kind = "jwt-provider" Name = "auth0" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjog....." } } ``` ``` Kind = "jwt-provider" Name = "okta" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjogW3...." } } ``` - add a service intention ``` Kind = "service-intentions" Name = "redis" JWT = { Providers = [ { Name = "okta" }, ] } Sources = [ { Name = "*" Permissions = [{ Action = "allow" HTTP = { PathPrefix = "/workspace" } JWT = { Providers = [ { Name = "okta" VerifyClaims = [ { Path = ["aud"] Value = "my_client_app" }, { Path = ["sub"] Value = "5be86359073c434bad2da3932222dabe" } ] }, ] } }, { Action = "allow" HTTP = { PathPrefix = "/" } JWT = { Providers = [ { Name = "auth0" }, ] } }] } ] ``` - generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with different claims than `/workspace` expects and 1 with correct claims - connect to your envoy (change service and address as needed) to view logs and potential errors. You can add: `-- --log-level debug` to see what data is being forwarded ``` consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502 ``` - Make the following requests: ``` curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v Successful request ``` ### TODO * [x] Update test coverage * [ ] update integration tests (follow-up PR) * [x] appropriate backport labels added * Support Consul Connect Envoy Command on Windows (#17694) ### Description Add support for consul connect envoy command on windows. This PR fixes the comments of PR - https://github.com/hashicorp/consul/pull/15114 ### Testing * Built consul.exe from this branch on windows and hosted here - [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Updated the [tutorial](https://developer.hashicorp.com/consul/tutorials/developer-mesh/consul-windows-workloads) and changed the `consul_url.default` value to [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Followed the steps in the tutorial and verified that everything is working as described. ### PR Checklist * [x] updated test coverage * [ ] external facing docs updated * [x] appropriate backport labels added * [x] not a security concern --------- Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * Change docs to say 168h instead of 7d for server_rejoin_age_max (#18154) ### Description Addresses https://github.com/hashicorp/consul/pull/17171#issuecomment-1636930705 * [OSS] test: improve xDS listener code coverage (#18138) test: improve xDS listener code coverage * Re-order expected/actual for assertContainerState in consul container tests (#18157) Re-order expected/actual, consul container tests * group and document make file (#17943) * group and document make file * Add `testing/deployer` (neé `consul-topology`) [NET-4610] (#17823) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> * [NET-4792] Add integrations tests for jwt-auth (#18169) * Add FIPS reference to consul enterprise docs (#18028) * Add FIPS reference to consul enterprise docs * Update website/content/docs/enterprise/index.mdx Co-authored-by: David Yu <dyu@hashicorp.com> * remove support for ecs client (fips) --------- Co-authored-by: David Yu <dyu@hashicorp.com> * add peering_commontopo tests [NET-3700] (#17951) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> * docs - remove Sentinel from enterprise features list (#18176) * Update index.mdx * Update kv.mdx * Update docs-nav-data.json * delete sentinel.mdx * Update redirects.js --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * [NET-4865] Bump golang.org/x/net to 0.12.0 (#18186) Bump golang.org/x/net to 0.12.0 While not necessary to directly address CVE-2023-29406 (which should be handled by using a patched version of Go when building), an accompanying change to HTTP/2 error handling does impact agent code. See https://go-review.googlesource.com/c/net/+/506995 for the HTTP/2 change. Bump this dependency across our submodules as well for the sake of potential indirect consumers of `x/net/http`. * Call resource mutate hook before validate hook (NET-4907) (#18178) * [NET-4865] security: Update Go version to 1.20.6 (#18190) Update Go version to 1.20.6 This resolves [CVE-2023-29406] (https://nvd.nist.gov/vuln/detail/CVE-2023-29406) for uses of the `net/http` standard library. Note that until the follow-up to #18124 is done, the version of Go used in those impacted tests will need to remain on 1.20.5. * Improve XDS test coverage: JWT auth edition (#18183) * Improve XDS test coverage: JWT auth edition more tests * test: xds coverage for jwt listeners --------- Co-authored-by: DanStough <dan.stough@hashicorp.com> * update readme.md (#18191) u[date readme.md * Update submodules to latest following 1.16.0 (#18197) Align all our internal use of submodules on the latest versions. * SEC-090: Automated trusted workflow pinning (2023-07-18) (#18174) Result of tsccr-helper -log-level=info -pin-all-workflows . Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> * Fix Backport Assistant PR commenting (#18200) * Fix Backport Assistant failure PR commenting For general comments on a PR, it looks like you have to use the `/issue` endpoint rather than `/pulls`, which requires commit/other review-specific target details. This matches the endpoint used in `backport-reminder.yml`. * Remove Backport Reminder workflow This is noisy (even when adding multiple labels, individual comments per label are generated), and likely no longer needed: we haven't had this work in a long time due to an expired GH token, and we now have better automation for backport PR assignment. * resource: Pass resource to Write ACL hook instead of just resource Id [NET-4908] (#18192) * Explicitly enable WebSocket upgrades (#18150) This PR explicitly enables WebSocket upgrades in Envoy's UpgradeConfig for all proxy types. (API Gateway, Ingress, and Sidecar.) Fixes #8283 * docs: fix the description of client rpc (#18206) * NET-4804: Add dashboard for monitoring consul-k8s (#18208) * [OSS] Improve xDS Code Coverage - Clusters (#18165) test: improve xDS cluster code coverage * NET-4222 take config file consul container (#18218) Net 4222 take config file consul container * Envoy Integration Test Windows (#18007) * [CONSUL-395] Update check_hostport and Usage (#40) * [CONSUL-397] Copy envoy binary from Image (#41) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix --------- Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * fix typos and update ecs compat table (#18215) * fix typos and update ecs compat table * real info for the ecs compat matrix table * Update website/content/docs/ecs/compatibility.mdx Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * [OSS] proxystate: add proxystate protos (#18216) * proxystate: add proxystate protos to pbmesh and resolve imports and conflicts between message names * ci: don't verify s390x (#18224) * [CC-5718] Remove HCP token requirement during bootstrap (#18140) * [CC-5718] Remove HCP token requirement during bootstrap * Re-add error for loading HCP management token * Remove old comment * Add changelog entry * Remove extra validation line * Apply suggestions from code review Co-authored-by: lornasong <lornasong@users.noreply.github.com> --------- Co-authored-by: lornasong <lornasong@users.noreply.github.com> * [NET-4122] Doc guidance for federation with externalServers (#18207) Doc guidance for federation with externalServers Add guidance for proper configuration when joining to a secondary cluster using WAN fed with external servers also enabled. Also clarify federation requirements and fix formatting for an unrelated value. Update both the Helm chart reference (synced from `consul-k8s`, see hashicorp/consul-k8s#2583) and the docs on using `externalServers`. * [OSS] Improve xDS Code Coverage - Endpoints and Misc (#18222) test: improve xDS endpoints code coverage * Clarify license reporting timing and GDPR compliance (#18237) Add Alicia's edits to clarify log timing and other details * Fix Github Workflow File (#18241) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix * Fix tests --------- Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236) * Align build arch matrix with enterprise (#18235) Ensure that OSS remains in sync w/ Enterprise by aligning the format of arch matrix args for various build jobs. * Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes" (#18248) Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236)" This reverts commit a11dba710e6ce6f172c0fa6c9b61567cc1efffc8. * resource: Add scope to resource type registration [NET-4976] (#18214) Enables querying a resource type's registration to determine if a resource is cluster, partition, or partition and namespace scoped. * Fix some inconsistencies in jwt docs (#18234) * NET-1825: More new ACL token creation docs (#18063) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [CC-5719] Add support for builtin global-read-only policy * Add changelog * Add read-only to docs * Fix some minor issues. * Change from ReplaceAll to Sprintf * Change IsValidPolicy name to return an error instead of bool * Fix PolicyList test * Fix other tests * Apply suggestions from code review Co-authored-by: Paul Glass <pglass@hashicorp.com> * Fix state store test for policy list. * Fix naming issues * Update acl/validation.go Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * backport of commit d63fa5481dc02c6faae7cc2647b4073b3286af1d * backport of commit 3d099a6ed8ed10b6dc464c466cb1668914db8f08 --------- Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Dan Bond <danbond@protonmail.com> Signed-off-by: josh <josh.timmons@hashicorp.com> Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> Co-authored-by: Ronald <roncodingenthusiast@users.noreply.github.com> Co-authored-by: Eric Haberkorn <erichaberkorn@gmail.com> Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com> Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Bryce Kalow <bkalow@hashicorp.com> Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: Matt Keeler <mkeeler@users.noreply.github.com> Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> Co-authored-by: Poonam Jadhav <poonam.jadhav@hashicorp.com> Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> Co-authored-by: Hariram Sankaran <56744845+ramramhariram@users.noreply.github.com> Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: Thomas Eckert <teckert@hashicorp.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Co-authored-by: Joshua Timmons <josh.timmons@hashicorp.com> Co-authored-by: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: Curt Bushko <cbushko@gmail.com> Co-authored-by: Tobias Birkefeld <t@craxs.de> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Co-authored-by: Semir Patel <semir.patel@hashicorp.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chappie <6537530+chapmanc@users.noreply.github.com> Co-authored-by: Derek Menteer <105233703+hashi-derek@users.noreply.github.com> Co-authored-by: John Murret <john.murret@hashicorp.com> Co-authored-by: Mark Campbell-Vincent <mnmvincent@gmail.com> Co-authored-by: Daniel Upton <daniel@floppy.co> Co-authored-by: Steven Zamborsky <97125550+stevenzamborsky@users.noreply.github.com> Co-authored-by: George Bolo <george.bolo@gmail.com> Co-authored-by: Chris S. Kim <ckim@hashicorp.com> Co-authored-by: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Co-authored-by: cskh <hui.kang@hashicorp.com> Co-authored-by: V. K <cn007b@gmail.com> Co-authored-by: Iryna Shustava <ishustava@users.noreply.github.com> Co-authored-by: Alex Simenduev <shamil.si@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: Dan Bond <danbond@protonmail.com> Co-authored-by: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Co-authored-by: Gerard Nguyen <gerard@hashicorp.com> Co-authored-by: mr-miles <miles.waller@gmail.com> Co-authored-by: natemollica-dev <57850649+natemollica-nm@users.noreply.github.com> Co-authored-by: John Maguire <john.maguire@hashicorp.com> Co-authored-by: Samantha <hello@entropy.cat> Co-authored-by: Ranjandas <thejranjan@gmail.com> Co-authored-by: Evan Phoenix <evan@phx.io> Co-authored-by: Michael Hofer <karras@users.noreply.github.com> Co-authored-by: J.C. Jones <james.jc.jones@gmail.com> Co-authored-by: Fulvio <fulviodenza823@gmail.com> Co-authored-by: Krastin Krastev <krastin@hashicorp.com> Co-authored-by: david3a <49253132+david3a@users.noreply.github.com> Co-authored-by: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Co-authored-by: Tom Davies <tom@t-davies.com> Co-authored-by: Vijay <vijayraghav22@gmail.com> Co-authored-by: Eddie Rowe <74205376+eddie-rowe@users.noreply.github.com> Co-authored-by: emilymianeil <eneil@hashicorp.com> Co-authored-by: nv-hashi <80716011+nv-hashi@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> Co-authored-by: hashicorp-tsccr[bot] <129506189+hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: Blake Covarrubias <blake@covarrubi.as> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com> Co-authored-by: Jeremy Jacobson <jjacobson93@users.noreply.github.com> Co-authored-by: lornasong <lornasong@users.noreply.github.com> Co-authored-by: Judith Malnick <judith@hashicorp.com> Co-authored-by: Jeremy Jacobson <jeremy.jacobson@hashicorp.com>
2023-08-01 17:37:13 +00:00
if builtinPolicy, ok := structs.ACLBuiltinPolicies[policy.ID]; ok {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if policy.Datacenters != nil || len(policy.Datacenters) > 0 {
Backport of [CC-5719] Add support for builtin global-read-only policy into release/1.16.x (#18345) * [OSS] Post Consul 1.16 updates (#17606) * chore: update dev build to 1.17 * chore(ci): add nightly 1.16 test Drop the oldest and add the newest running release branch to nightly builds. * Add writeAuditRPCEvent to agent_oss (#17607) * Add writeAuditRPCEvent to agent_oss * fix the other diffs * backport change log * Add Envoy and Consul version constraints to Envoy extensions (#17612) * [API Gateway] Fix trust domain for external peered services in synthesis code (#17609) * [API Gateway] Fix trust domain for external peered services in synthesis code * Add changelog * backport ent changes to oss (#17614) * backport ent changes to oss * Update .changelog/_5669.txt Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> --------- Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> * Update intentions.mdx (#17619) Make behaviour of L7 intentions clearer * enterprise changelog update for audit (#17625) * Update list of Envoy versions (#17546) * [API Gateway] Fix rate limiting for API gateways (#17631) * [API Gateway] Fix rate limiting for API gateways * Add changelog * Fix failing unit tests * Fix operator usage tests for api package * sort some imports that are wonky between oss and ent (#17637) * PmTLS and tproxy improvements with failover and L7 traffic mgmt for k8s (#17624) * porting over changes from enterprise repo to oss * applied feedback on service mesh for k8s overview * fixed typo * removed ent-only build script file * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Delete check-legacy-links-format.yml (#17647) * docs: Reference doc updates for permissive mTLS settings (#17371) * Reference doc updates for permissive mTLS settings * Document config entry filtering * Fix minor doc errors (double slashes in link url paths) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add generic experiments configuration and use it to enable catalog v2 resources (#17604) * Add generic experiments configuration and use it to enable catalog v2 resources * Run formatting with -s as CI will validate that this has been done * api-gateway: stop adding all header filters to virtual host when generating xDS (#17644) * Add header filter to api-gateway xDS golden test * Stop adding all header filters to virtual host when generating xDS for api-gateway * Regenerate xDS golden file for api-gateway w/ header filter * fix: add agent info reporting log (#17654) * Add new Consul 1.16 docs (#17651) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * fix build errors --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Default `ProxyType` for builtin extensions (#17657) * Post 1.16.0-rc1 updates (#17663) - Update changelog to include new entries from release - Update submodule versions to latest published * Update service-defaults.mdx (#17656) * docs: Sameness Groups (#17628) * port from enterprise branch * Apply suggestions from code review Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx * next steps * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Remove "BETA" marker from config entries (#17670) * CAPIgw for K8s installation updates for 1.16 (#17627) * trimmed CRD step and reqs from installation * updated tech specs * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * added upgrade instruction * removed tcp port req * described downtime and DT-less upgrades * applied additional review feedback --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * additional feedback on API gateway upgrades (#17677) * additional feedback * Update website/content/docs/api-gateway/upgrades.mdx Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> --------- Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * docs: JWT Authorization for intentions (#17643) * Initial page/nav creation * configuration entry reference page * Usage + fixes * service intentions page * usage * description * config entry updates * formatting fixes * Update website/content/docs/connect/config-entries/service-intentions.mdx Co-authored-by: Paul Glass <pglass@hashicorp.com> * service intentions review fixes * Overview page review fixes * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: minor fixes to JWT auth docs (#17680) * Fixes * service intentions fixes * Fix two WAL metrics in docs/agent/telemetry.mdx (#17593) * updated failover for k8s w-tproxy page title (#17683) * Add release notes 1.16 rc (#17665) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Add release notes for 1.16-rc * Add consul-e license utlization reporting * Update with rc absolute links * Update with rc absolute links * fix typo * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update to use callout component * address typo * docs: FIPS 140-2 Compliance (#17668) * Page + nav + formatting * link fix * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * link fix * Apply suggestions from code review Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix apigw install values file * fix typos in release notes --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix release notes links (#17687) * adding redirects for tproxy and envoy extensions (#17688) * adding redirects * Apply suggestions from code review * Fix FIPS copy (#17691) * fix release notes links * fix typos on fips docs * [NET-4107][Supportability] Log Level set to TRACE and duration set to 5m for consul-debug (#17596) * changed duration to 5 mins and log level to trace * documentation update * change log * ENT merge of ext-authz extension updates (#17684) * docs: Update default values for Envoy extension proxy types (#17676) * fix: stop peering delete routine on leader loss (#17483) * Refactor disco chain prioritize by locality structs (#17696) This includes prioritize by localities on disco chain targets rather than resolvers, allowing different targets within the same partition to have different policies. * agent: remove agent cache dependency from service mesh leaf certificate management (#17075) * agent: remove agent cache dependency from service mesh leaf certificate management This extracts the leaf cert management from within the agent cache. This code was produced by the following process: 1. All tests in agent/cache, agent/cache-types, agent/auto-config, agent/consul/servercert were run at each stage. - The tests in agent matching .*Leaf were run at each stage. - The tests in agent/leafcert were run at each stage after they existed. 2. The former leaf cert Fetch implementation was extracted into a new package behind a "fake RPC" endpoint to make it look almost like all other cache type internals. 3. The old cache type was shimmed to use the fake RPC endpoint and generally cleaned up. 4. I selectively duplicated all of Get/Notify/NotifyCallback/Prepopulate from the agent/cache.Cache implementation over into the new package. This was renamed as leafcert.Manager. - Code that was irrelevant to the leaf cert type was deleted (inlining blocking=true, refresh=false) 5. Everything that used the leaf cert cache type (including proxycfg stuff) was shifted to use the leafcert.Manager instead. 6. agent/cache-types tests were moved and gently replumbed to execute as-is against a leafcert.Manager. 7. Inspired by some of the locking changes from derek's branch I split the fat lock into N+1 locks. 8. The waiter chan struct{} was eventually replaced with a singleflight.Group around cache updates, which was likely the biggest net structural change. 9. The awkward two layers or logic produced as a byproduct of marrying the agent cache management code with the leaf cert type code was slowly coalesced and flattened to remove confusion. 10. The .*Leaf tests from the agent package were copied and made to work directly against a leafcert.Manager to increase direct coverage. I have done a best effort attempt to port the previous leaf-cert cache type's tests over in spirit, as well as to take the e2e-ish tests in the agent package with Leaf in the test name and copy those into the agent/leafcert package to get more direct coverage, rather than coverage tangled up in the agent logic. There is no net-new test coverage, just coverage that was pushed around from elsewhere. * [core]: Pin github action workflows (#17695) * docs: missing changelog for _5517 (#17706) * add enterprise notes for IP-based rate limits (#17711) * add enterprise notes for IP-based rate limits * Apply suggestions from code review Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * added bolded 'Enterprise' in list items. --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Update compatibility.mdx (#17713) * Remove extraneous version info for Config entries (#17716) * Update terminating-gateway.mdx * Update exported-services.mdx * Update mesh.mdx * fix: typo in link to section (#17527) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Bump Alpine to 3.18 (#17719) * Update Dockerfile * Create 17719.txt * NET-1825: New ACL token creation docs (#16465) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * [NET-3865] [Supportability] Additional Information in the output of 'consul operator raft list-peers' (#17582) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * OSS merge: Update error handling login when applying extensions (#17740) * Bump atlassian/gajira-transition from 3.0.0 to 3.0.1 (#17741) Bumps [atlassian/gajira-transition](https://github.com/atlassian/gajira-transition) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/atlassian/gajira-transition/releases) - [Commits](https://github.com/atlassian/gajira-transition/compare/4749176faf14633954d72af7a44d7f2af01cc92b...38fc9cd61b03d6a53dd35fcccda172fe04b36de3) --- updated-dependencies: - dependency-name: atlassian/gajira-transition dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add truncation to body (#17723) * docs: Failover overview minor fix (#17743) * Incorrect symbol * Clarification * slight edit for clarity * docs - update Envoy and Dataplane compat matrix (#17752) * Update envoy.mdx added more detail around default versus other compatible versions * validate localities on agent configs and registration endpoints (#17712) * Updated docs added explanation. (#17751) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Update index.mdx (#17749) * added redirects and updated links (#17764) * Add transparent proxy enhancements changelog (#17757) * docs - remove use of consul leave during upgrade instructions (#17758) * Fix issue with streaming service health watches. (#17775) Fix issue with streaming service health watches. This commit fixes an issue where the health streams were unaware of service export changes. Whenever an exported-services config entry is modified, it is effectively an ACL change. The bug would be triggered by the following situation: - no services are exported - an upstream watch to service X is spawned - the streaming backend filters out data for service X (due to lack of exports) - service X is finally exported In the situation above, the streaming backend does not trigger a refresh of its data. This means that any events that were supposed to have been received prior to the export are NOT backfilled, and the watches never see service X spawning. We currently have decided to not trigger a stream refresh in this situation due to the potential for a thundering herd effect (touching exports would cause a re-fetch of all watches for that partition, potentially). Therefore, a local blocking-query approach was added by this commit for agentless. It's also worth noting that the streaming subscription is currently bypassed most of the time with agentful, because proxycfg has a `req.Source.Node != ""` which prevents the `streamingEnabled` check from passing. This means that while agents should technically have this same issue, they don't experience it with mesh health watches. Note that this is a temporary fix that solves the issue for proxycfg, but not service-discovery use cases. * Property Override validation improvements (#17759) * Reject inbound Prop Override patch with Services Services filtering is only supported for outbound TrafficDirection patches. * Improve Prop Override unexpected type validation - Guard against additional invalid parent and target types - Add specific error handling for Any fields (unsupported) * Fixes (#17765) * Update license get explanation (#17782) This PR is to clarify what happens if the license get command is run on a follower if the leader hasn't been updated with a newer license. * Add Patch index to Prop Override validation errors (#17777) When a patch is found invalid, include its index for easier debugging when multiple patches are provided. * Stop referenced jwt providers from being deleted (#17755) * Stop referenced jwt providers from being deleted * Implement a Catalog Controllers Lifecycle Integration Test (#17435) * Implement a Catalog Controllers Lifecycle Integration Test * Prevent triggering the race detector. This allows defining some variables for protobuf constants and using those in comparisons. Without that, something internal in the fmt package ended up looking at the protobuf message size cache and triggering the race detector. * HCP Add node id/name to config (#17750) * Catalog V2 Container Based Integration Test (#17674) * Implement the Catalog V2 controller integration container tests This now allows the container tests to import things from the root module. However for now we want to be very restrictive about which packages we allow importing. * Add an upgrade test for the new catalog Currently this should be dormant and not executed. However its put in place to detect breaking changes in the future and show an example of how to do an upgrade test with integration tests structured like catalog v2. * Make testutil.Retry capable of performing cleanup operations These cleanup operations are executed after each retry attempt. * Move TestContext to taking an interface instead of a concrete testing.T This allows this to be used on a retry.R or generally anything that meets the interface. * Move to using TestContext instead of background contexts Also this forces all test methods to implement the Cleanup method now instead of that being an optional interface. Co-authored-by: Daniel Upton <daniel@floppy.co> * Fix Docs for Trails Leader By (#17763) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added * fix doc * fix docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Improve Prop Override docs examples (#17799) - Provide more realistics examples for setting properties not already supported natively by Consul - Remove superfluous commas from HCL, correct target service name, and fix service defaults vs. proxy defaults in examples - Align existing integration test to updated docs * Test permissive mTLS filter chain not configured with tproxy disabled (#17747) * Add documentation for remote debugging of integration tests. (#17800) * Add documentation for remote debugging of integration tests. * add link from main docs page. * changes related to PR feedback * Clarify limitations of Prop Override extension (#17801) Explicitly document the limitations of the extension, particularly what kind of fields it is capable of modifying. * Fix formatting for webhook-certs Consul tutorial (#17810) * Fix formatting for webhook-certs Consul tutorial * Make a small grammar change to also pick up whitespace changes necessary for formatting --------- Co-authored-by: David Yu <dyu@hashicorp.com> * Add jwt-authn metrics to jwt-provider docs (#17816) * [NET-3095] add jwt-authn metrics docs * Change URLs for redirects from RC to default latest (#17822) * Set GOPRIVATE for all hashicorp repos in CI (#17817) Consistently set GOPRIVATE to include all hashicorp repos, s.t. private modules are successfully pulled in enterprise CI. * Make locality aware routing xDS changes (#17826) * Fixup consul-container/test/debugging.md (#17815) Add missing `-t` flag and fix minor typo. * fixes #17732 - AccessorID in request body should be optional when updating ACL token (#17739) * AccessorID in request body should be optional when updating ACL token * add a test case * fix test case * add changelog entry for PR #17739 * CA provider doc updates and Vault provider minor update (#17831) Update CA provider docs Clarify that providers can differ between primary and secondary datacenters Provide a comparison chart for consul vs vault CA providers Loosen Vault CA provider validation for RootPKIPath Update Vault CA provider documentation * ext-authz Envoy extension: support `localhost` as a valid target URI. (#17821) * CI Updates (#17834) * Ensure that git access to private repos uses the ELEVATED_GITHUB_TOKEN * Bump the runner size for the protobuf generation check This has failed previously when the runner process that communicates with GitHub gets starved causing the job to fail. * counter part of ent pr (#17618) * watch: support -filter for consul watch: checks, services, nodes, service (#17780) * watch: support -filter for watch checks * Add filter for watch nodes, services, and service - unit test added - Add changelog - update doc * Trigger OSS => ENT merge for all release branches (#17853) Previously, this only triggered for release/*.*.x branches; however, our release process involves cutting a release/1.16.0 branch, for example, at time of code freeze these days. Any PRs to that branch after code freeze today do not make their way to consul-enterprise. This will make behavior for a .0 branch consistent with current behavior for a .x branch. * Update service-mesh.mdx (#17845) Deleted two commas which looks quite like some leftovers. * Add docs for sameness groups with resolvers. (#17851) * docs: add note about path prefix matching behavior for HTTPRoute config (#17860) * Add note about path prefix matching behavior for HTTPRoute config * Update website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: update upgrade to consul-dataplane docs on k8s (#17852) * resource: add `AuthorizerContext` helper method (#17393) * resource: enforce consistent naming of resource types (#17611) For consistency, resource type names must follow these rules: - `Group` must be snake case, and in most cases a single word. - `GroupVersion` must be lowercase, start with a "v" and end with a number. - `Kind` must be pascal case. These were chosen because they map to our protobuf type naming conventions. * tooling: generate protoset file (#17364) Extends the `proto` make target to generate a protoset file for use with grpcurl etc. * Fix a bug that wrongly trims domains when there is an overlap with DC name (#17160) * Fix a bug that wrongly trims domains when there is an overlap with DC name Before this change, when DC name and domain/alt-domain overlap, the domain name incorrectly trimmed from the query. Example: Given: datacenter = dc-test, alt-domain = test.consul. Querying for "test-node.node.dc-test.consul" will faile, because the code was trimming "test.consul" instead of just ".consul" This change, fixes the issue by adding dot (.) before trimming * trimDomain: ensure domain trimmed without modyfing original domains * update changelog --------- Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * deps: aws-sdk-go v1.44.289 (#17876) Signed-off-by: Dan Bond <danbond@protonmail.com> * api-gateway: add operation cannot be fulfilled error to common errors (#17874) * add error message * Update website/content/docs/api-gateway/usage/errors.mdx Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * fix formating issues --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * api-gateway: add step to upgrade instructions for creating intentions (#17875) * Changelog - add 1.13.9, 1.14.8, and 1.15.4 (#17889) * docs: update config enable_debug (#17866) * update doc for config enable_debug * Update website/content/docs/agent/config/config-files.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update wording on WAN fed and intermediate_pki_path (#17850) * Allow service identity tokens the ability to read jwt-providers (#17893) * Allow service identity tokens the ability to read jwt-providers * more tests * service_prefix tests * Update docs (#17476) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add emit_tags_as_labels to envoy bootstrap config when using Consul Telemetry Collector (#17888) * Fix command from kg to kubectl get (#17903) * Create and update release notes for 1.16 and 1.2 (#17895) * update release notes for 1.16 and 1.2 * update latest consul core release * Propose new changes to APIgw upgrade instructions (#17693) * Propose new changes to APIgw upgrade instructions * fix build error * update callouts to render correctly * Add hideClipboard to log messages * Added clarification around consul k8s and crds * Add workflow to verify linux release packages (#17904) * adding docker files to verify linux packages. * add verifr-release-linux.yml * updating name * pass inputs directly into jobs * add other linux package platforms * remove on push * fix TARGETARCH on debian and ubuntu so it can check arm64 and amd64 * fixing amazon to use the continue line * add ubuntu i386 * fix comment lines * working * remove commented out workflow jobs * Apply suggestions from code review Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * update fedora and ubuntu to use latest tag --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * Reference hashicorp/consul instead of consul for Docker image (#17914) * Reference hashicorp/consul instead of consul for Docker image * Update Make targets that pull consul directly * Update Consul K8s Upgrade Doc Updates (#17921) Updating upgrade procedures to encompass expected errors during upgrade process from v1.13.x to v1.14.x. * Update sameness-group.mdx (#17915) * Update create-sameness-groups.mdx (#17927) * deps: coredns v1.10.1 (#17912) * Ensure RSA keys are at least 2048 bits in length (#17911) * Ensure RSA keys are at least 2048 bits in length * Add changelog * update key length check for FIPS compliance * Fix no new variables error and failing to return when error exists from validating * clean up code for better readability * actually return value * tlsutil: Fix check TLS configuration (#17481) * tlsutil: Fix check TLS configuration * Rewording docs. * Update website/content/docs/services/configuration/checks-configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Fix typos and add changelog entry. --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Deprecations for connect-native SDK and specific connect native APIs (#17937) * Update v1_16_x.mdx * Update connect native golang page --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Revert "Add workflow to verify linux release packages (#17904)" (#17942) This reverts commit 3368f14fab500ebe9f6aeab5631dd1d5f5a453e5. * Fixes Secondary ConnectCA update (#17846) This fixes a bug that was identified which resulted in subsequent ConnectCA configuration update not to persist in the cluster. * fixing typo in link to jwt-validations-with-intentions doc (#17955) * Fix streaming backend link (#17958) * Fix streaming backend link * Update health.mdx * Dynamically create jwks clusters for jwt-providers (#17944) * website: remove deprecated agent rpc docs (#17962) * Fix missing BalanceOutboundConnections in v2 catalog. (#17964) * feature - [NET - 4005] - [Supportability] Reloadable Configuration - enable_debug (#17565) * # This is a combination of 9 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same * # This is a combination of 12 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same # This is the commit message #10: made enable debug atomic bool # This is the commit message #11: fix lint # This is the commit message #12: fix test true enable debug * parent 10f500e895d92cc3691ade7b74a33db755d22039 author absolutelightning <ashesh.vidyut@hashicorp.com> 1687352587 +0530 committer absolutelightning <ashesh.vidyut@hashicorp.com> 1687352592 +0530 init without tests change log fix tests fix tests added tests change log breaking change removed breaking change fix test keeping the test behaviour same made enable debug atomic bool fix lint fix test true enable debug using enable debug in agent as atomic bool test fixes fix tests fix tests added update on correct locaiton fix tests fix reloadable config enable debug fix tests fix init and acl 403 * revert commit * Fix formatting codeblocks on APIgw docs (#17970) * fix formatting codeblocks * remove unnecessary indents * Remove POC code (#17974) * update doc (#17910) * update doc * update link * Remove duplicate and unused newDecodeConfigEntry func (#17979) * docs: samenessGroup YAML examples (#17984) * configuration entry syntax * Example config * Add changelog entry for 1.16.0 (#17987) * Fix typo (#17198) servcies => services * Expose JWKS cluster config through JWTProviderConfigEntry (#17978) * Expose JWKS cluster config through JWTProviderConfigEntry * fix typos, rename trustedCa to trustedCA * Integration test for ext-authz Envoy extension (#17980) * Fix incorrect protocol for transparent proxy upstreams. (#17894) This PR fixes a bug that was introduced in: https://github.com/hashicorp/consul/pull/16021 A user setting a protocol in proxy-defaults would cause tproxy implicit upstreams to not honor the upstream service's protocol set in its `ServiceDefaults.Protocol` field, and would instead always use the proxy-defaults value. Due to the fact that upstreams configured with "tcp" can successfully contact upstream "http" services, this issue was not recognized until recently (a proxy-defaults with "tcp" and a listening service with "http" would make successful requests, but not the opposite). As a temporary work-around, users experiencing this issue can explicitly set the protocol on the `ServiceDefaults.UpstreamConfig.Overrides`, which should take precedence. The fix in this PR removes the proxy-defaults protocol from the wildcard upstream that tproxy uses to configure implicit upstreams. When the protocol was included, it would always overwrite the value during discovery chain compilation, which was not correct. The discovery chain compiler also consumes proxy defaults to determine the protocol, so simply excluding it from the wildcard upstream config map resolves the issue. * feat: include nodes count in operator usage endpoint and cli command (#17939) * feat: update operator usage api endpoint to include nodes count * feat: update operator usange cli command to includes nodes count * [OSS] Improve Gateway Test Coverage of Catalog Health (#18011) * fix(cli): remove failing check from 'connect envoy' registration for api gateway * test(integration): add tests to check catalog statsus of gateways on startup * remove extra sleep comment * Update test/integration/consul-container/libs/assert/service.go * changelog * Fixes Traffic rate limitting docs (#17997) * Fix removed service-to-service peering links (#17221) * docs: fix removed service-to-service peering links * docs: extend peering-via-mesh-gateways intro (thanks @trujillo-adam) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Sameness "beta" warning (#18017) * Warning updates * .x * updated typo in tab heading (#18022) * updated typo in tab heading * updated tab group typo, too * Document that DNS lookups can target cluster peers (#17990) Static DNS lookups, in addition to explicitly targeting a datacenter, can target a cluster peer. This was added in 95dc0c7b301b70a6b955a8b7c9737c9b86f03df6 but didn't make the documentation. The driving function for the change is `parseLocality` here: https://github.com/hashicorp/consul/blob/0b1299c28d8127129d61310ee4280055298438e0/agent/dns_oss.go#L25 The biggest change in this is to adjust the standard lookup syntax to tie `.<datacenter>` to `.dc` as required-together, and to append in the similar `.<cluster-peer>.peer` optional argument, both to A record and SRV record lookups. Co-authored-by: David Yu <dyu@hashicorp.com> * Add first integration test for jwt auth with intention (#18005) * fix stand-in text for name field (#18030) * removed sameness conf entry from failover nav (#18033) * docs - add service sync annotations and k8s service weight annotation (#18032) * Docs for https://github.com/hashicorp/consul-k8s/pull/2293 * remove versions for enterprise features since they are old --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * docs - add jobs use case for service mesh k8s (#18037) * docs - add jobs use case for service mesh k8s * add code blocks * address feedback (#18045) * Add verify server hostname to tls default (#17155) * [OSS] Fix initial_fetch_timeout to wait for all xDS resources (#18024) * fix(connect): set initial_fetch_time to wait indefinitely * changelog * PR feedback 1 * ui: fix typos for peer service imports (#17999) * test: fix FIPS inline cert test message (#18076) * Fix a couple typos in Agent Telemetry Metrics docs (#18080) * Fix metrics docs * Add changelog Signed-off-by: josh <josh.timmons@hashicorp.com> --------- Signed-off-by: josh <josh.timmons@hashicorp.com> * docs updates - cluster peering and virtual services (#18069) * Update route-to-virtual-services.mdx * Update establish-peering.mdx * Update service-mesh-compare.mdx (#17279) grammar change * Update helm docs on main (#18085) * ci: use gotestsum v1.10.1 [NET-4042] (#18088) * Docs: Update proxy lifecycle annotations and consul-dataplane flags (#18075) * Update proxy lifecycle annotations and consul-dataplane flags * Pass configured role name to Vault for AWS auth in Connect CA (#17885) * Docs for dataplane upgrade on k8s (#18051) * Docs for dataplane upgrade on k8s --------- Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs - update upgrade index page to not recommend consul leave. (#18100) * Displays Consul version of each nodes in UI nodes section (#17754) * update UINodes and UINodeInfo response with consul-version info added as NodeMeta, fetched from serf members * update test cases TestUINodes, TestUINodeInfo * added nil check for map * add consul-version in local agent node metadata * get consul version from serf member and add this as node meta in catalog register request * updated ui mock response to include consul versions as node meta * updated ui trans and added version as query param to node list route * updates in ui templates to display consul version with filter and sorts * updates in ui - model class, serializers,comparators,predicates for consul version feature * added change log for Consul Version Feature * updated to get version from consul service, if for some reason not available from serf * updated changelog text * updated dependent testcases * multiselection version filter * Update agent/consul/state/catalog.go comments updated Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --------- Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * api gw 1.16 updates (#18081) * api gw 1.16 updates * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * update CodeBlockConfig filename * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * remove non-standard intentions page * Update website/content/docs/api-gateway/configuration/index.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [NET-4103] ci: build s390x (#18067) * ci: build s390x * ci: test s390x * ci: dev build s390x * no GOOS * ent only * build: publish s390x * fix syntax error * fix syntax error again * fix syntax error again x2 * test branch * Move s390x conditionals to step level * remove test branch --------- Co-authored-by: emilymianeil <eneil@hashicorp.com> * :ermahgerd "Sevice Mesh" -> "Service Mesh" (#18116) Just a typo in the docs. * Split pbmesh.UpstreamsConfiguration as a resource out of pbmesh.Upstreams (#17991) Configuration that previously was inlined into the Upstreams resource applies to both explicit and implicit upstreams and so it makes sense to split it out into its own resource. It also has other minor changes: - Renames `proxy.proto` proxy_configuration.proto` - Changes the type of `Upstream.destination_ref` from `pbresource.ID` to `pbresource.Reference` - Adds comments to fields that didn't have them * [NET-4895] ci - api tests and consul container tests error because of dependency bugs with go 1.20.6. Pin go to 1.20.5. (#18124) ### Description The following jobs started failing when go 1.20.6 was released: - `go-test-api-1-19` - `go-test-api-1-20` - `compatibility-integration-tests` - `upgrade-integration-tests` `compatibility-integration-tests` and `compatibility-integration-tests` to this testcontainers issue: https://github.com/testcontainers/testcontainers-go/issues/1359. This issue calls for testcontainers to release a new version when one of their dependencies is fixed. When that is done, we will unpin the go versions in `compatibility-integration-tests` and `compatibility-integration-tests`. ### Testing & Reproduction steps See these jobs broken in CI and then see them work with this PR. --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * Add ingress gateway deprecation notices to docs (#18102) ### Description This adds notices, that ingress gateway is deprecated, to several places in the product docs where ingress gateway is the topic. ### Testing & Reproduction steps Tested with a local copy of the website. ### Links Deprecation of ingress gateway was announced in the Release Notes for Consul 1.16 and Consul-K8s 1.2. See: [https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated ) [https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated) ### PR Checklist * [N/A] updated test coverage * [X] external facing docs updated * [X] appropriate backport labels added * [X] not a security concern --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add docs for jwt cluster configuration (#18004) ### Description <!-- Please describe why you're making this change, in plain English. --> - Add jwt-provider docs for jwks cluster configuration. The configuration was added here: https://github.com/hashicorp/consul/pull/17978 * Docs: fix unmatched bracket for health checks page (#18134) * NET-4657/add resource service client (#18053) ### Description <!-- Please describe why you're making this change, in plain English. --> Dan had already started on this [task](https://github.com/hashicorp/consul/pull/17849) which is needed to start building the HTTP APIs. This just needed some cleanup to get it ready for review. Overview: - Rename `internalResourceServiceClient` to `insecureResourceServiceClient` for name consistency - Configure a `secureResourceServiceClient` with auth enabled ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Fix bug with Vault CA provider (#18112) Updating RootPKIPath but not IntermediatePKIPath would not update leaf signing certs with the new root. Unsure if this happens in practice but manual testing showed it is a bug that would break mesh and agent connections once the old root is pruned. * [NET-4897] net/http host header is now verified and request.host that contains socked now error (#18129) ### Description This is related to https://github.com/hashicorp/consul/pull/18124 where we pinned the go versions in CI to 1.20.5 and 1.19.10. go 1.20.6 and 1.19.11 now validate request host headers for validity, including the hostname cannot be prefixed with slashes. For local communications (npipe://, unix://), the hostname is not used, but we need valid and meaningful hostname. Prior versions go Go would clean the host header, and strip slashes in the process, but go1.20.6 and go1.19.11 no longer do, and reject the host header. Around the community we are seeing that others are intercepting the req.host and if it starts with a slash or ends with .sock, they changing the host to localhost or another dummy value. [client: define a "dummy" hostname to use for local connections by thaJeztah · Pull Request #45942 · moby/moby](https://github.com/moby/moby/pull/45942) ### Testing & Reproduction steps Check CI tests. ### Links * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * add a conditional around setting LANFilter.AllSegments to make sure it is valid (#18139) ### Description This is to correct a code problem because this assumes all segments, but when you get to Enterprise, you can be in partition that is not the default partition, in which case specifying all segments does not validate and fails. This is to correct the setting of this filter with `AllSegments` to `true` to only occur when in the the `default` partition. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> ### PR Checklist * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * chore: bump upgrade integrations tests to 1.15, 116 [NET-4743] (#18130) * re org resource type registry (#18133) * fix: update delegateMock used in ENT (#18149) ### Description <!-- Please describe why you're making this change, in plain English. --> The mock is used in `http_ent_test` file which caused lint failures. For OSS->ENT parity adding the same change here. ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> Identified in OSS->ENT [merge PR](https://github.com/hashicorp/consul-enterprise/pull/6328) ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062) ### Description <!-- Please describe why you're making this change, in plain English. --> - Currently the jwt-auth filter doesn't take into account the service identity when validating jwt-auth, it only takes into account the path and jwt provider during validation. This causes issues when multiple source intentions restrict access to an endpoint with different JWT providers. - To fix these issues, rather than use the JWT auth filter for validation, we use it in metadata mode and allow it to forward the successful validated JWT token payload to the RBAC filter which will make the decisions. This PR ensures requests with and without JWT tokens successfully go through the jwt-authn filter. The filter however only forwards the data for successful/valid tokens. On the RBAC filter level, we check the payload for claims and token issuer + existing rbac rules. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> - This test covers a multi level jwt requirements (requirements at top level and permissions level). It also assumes you have envoy running, you have a redis and a sidecar proxy service registered, and have a way to generate jwks with jwt. I mostly use: https://www.scottbrady91.com/tools/jwt for this. - first write your proxy defaults ``` Kind = "proxy-defaults" name = "global" config { protocol = "http" } ``` - Create two providers ``` Kind = "jwt-provider" Name = "auth0" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjog....." } } ``` ``` Kind = "jwt-provider" Name = "okta" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjogW3...." } } ``` - add a service intention ``` Kind = "service-intentions" Name = "redis" JWT = { Providers = [ { Name = "okta" }, ] } Sources = [ { Name = "*" Permissions = [{ Action = "allow" HTTP = { PathPrefix = "/workspace" } JWT = { Providers = [ { Name = "okta" VerifyClaims = [ { Path = ["aud"] Value = "my_client_app" }, { Path = ["sub"] Value = "5be86359073c434bad2da3932222dabe" } ] }, ] } }, { Action = "allow" HTTP = { PathPrefix = "/" } JWT = { Providers = [ { Name = "auth0" }, ] } }] } ] ``` - generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with different claims than `/workspace` expects and 1 with correct claims - connect to your envoy (change service and address as needed) to view logs and potential errors. You can add: `-- --log-level debug` to see what data is being forwarded ``` consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502 ``` - Make the following requests: ``` curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v Successful request ``` ### TODO * [x] Update test coverage * [ ] update integration tests (follow-up PR) * [x] appropriate backport labels added * Support Consul Connect Envoy Command on Windows (#17694) ### Description Add support for consul connect envoy command on windows. This PR fixes the comments of PR - https://github.com/hashicorp/consul/pull/15114 ### Testing * Built consul.exe from this branch on windows and hosted here - [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Updated the [tutorial](https://developer.hashicorp.com/consul/tutorials/developer-mesh/consul-windows-workloads) and changed the `consul_url.default` value to [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Followed the steps in the tutorial and verified that everything is working as described. ### PR Checklist * [x] updated test coverage * [ ] external facing docs updated * [x] appropriate backport labels added * [x] not a security concern --------- Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * Change docs to say 168h instead of 7d for server_rejoin_age_max (#18154) ### Description Addresses https://github.com/hashicorp/consul/pull/17171#issuecomment-1636930705 * [OSS] test: improve xDS listener code coverage (#18138) test: improve xDS listener code coverage * Re-order expected/actual for assertContainerState in consul container tests (#18157) Re-order expected/actual, consul container tests * group and document make file (#17943) * group and document make file * Add `testing/deployer` (neé `consul-topology`) [NET-4610] (#17823) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> * [NET-4792] Add integrations tests for jwt-auth (#18169) * Add FIPS reference to consul enterprise docs (#18028) * Add FIPS reference to consul enterprise docs * Update website/content/docs/enterprise/index.mdx Co-authored-by: David Yu <dyu@hashicorp.com> * remove support for ecs client (fips) --------- Co-authored-by: David Yu <dyu@hashicorp.com> * add peering_commontopo tests [NET-3700] (#17951) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> * docs - remove Sentinel from enterprise features list (#18176) * Update index.mdx * Update kv.mdx * Update docs-nav-data.json * delete sentinel.mdx * Update redirects.js --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * [NET-4865] Bump golang.org/x/net to 0.12.0 (#18186) Bump golang.org/x/net to 0.12.0 While not necessary to directly address CVE-2023-29406 (which should be handled by using a patched version of Go when building), an accompanying change to HTTP/2 error handling does impact agent code. See https://go-review.googlesource.com/c/net/+/506995 for the HTTP/2 change. Bump this dependency across our submodules as well for the sake of potential indirect consumers of `x/net/http`. * Call resource mutate hook before validate hook (NET-4907) (#18178) * [NET-4865] security: Update Go version to 1.20.6 (#18190) Update Go version to 1.20.6 This resolves [CVE-2023-29406] (https://nvd.nist.gov/vuln/detail/CVE-2023-29406) for uses of the `net/http` standard library. Note that until the follow-up to #18124 is done, the version of Go used in those impacted tests will need to remain on 1.20.5. * Improve XDS test coverage: JWT auth edition (#18183) * Improve XDS test coverage: JWT auth edition more tests * test: xds coverage for jwt listeners --------- Co-authored-by: DanStough <dan.stough@hashicorp.com> * update readme.md (#18191) u[date readme.md * Update submodules to latest following 1.16.0 (#18197) Align all our internal use of submodules on the latest versions. * SEC-090: Automated trusted workflow pinning (2023-07-18) (#18174) Result of tsccr-helper -log-level=info -pin-all-workflows . Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> * Fix Backport Assistant PR commenting (#18200) * Fix Backport Assistant failure PR commenting For general comments on a PR, it looks like you have to use the `/issue` endpoint rather than `/pulls`, which requires commit/other review-specific target details. This matches the endpoint used in `backport-reminder.yml`. * Remove Backport Reminder workflow This is noisy (even when adding multiple labels, individual comments per label are generated), and likely no longer needed: we haven't had this work in a long time due to an expired GH token, and we now have better automation for backport PR assignment. * resource: Pass resource to Write ACL hook instead of just resource Id [NET-4908] (#18192) * Explicitly enable WebSocket upgrades (#18150) This PR explicitly enables WebSocket upgrades in Envoy's UpgradeConfig for all proxy types. (API Gateway, Ingress, and Sidecar.) Fixes #8283 * docs: fix the description of client rpc (#18206) * NET-4804: Add dashboard for monitoring consul-k8s (#18208) * [OSS] Improve xDS Code Coverage - Clusters (#18165) test: improve xDS cluster code coverage * NET-4222 take config file consul container (#18218) Net 4222 take config file consul container * Envoy Integration Test Windows (#18007) * [CONSUL-395] Update check_hostport and Usage (#40) * [CONSUL-397] Copy envoy binary from Image (#41) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix --------- Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * fix typos and update ecs compat table (#18215) * fix typos and update ecs compat table * real info for the ecs compat matrix table * Update website/content/docs/ecs/compatibility.mdx Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * [OSS] proxystate: add proxystate protos (#18216) * proxystate: add proxystate protos to pbmesh and resolve imports and conflicts between message names * ci: don't verify s390x (#18224) * [CC-5718] Remove HCP token requirement during bootstrap (#18140) * [CC-5718] Remove HCP token requirement during bootstrap * Re-add error for loading HCP management token * Remove old comment * Add changelog entry * Remove extra validation line * Apply suggestions from code review Co-authored-by: lornasong <lornasong@users.noreply.github.com> --------- Co-authored-by: lornasong <lornasong@users.noreply.github.com> * [NET-4122] Doc guidance for federation with externalServers (#18207) Doc guidance for federation with externalServers Add guidance for proper configuration when joining to a secondary cluster using WAN fed with external servers also enabled. Also clarify federation requirements and fix formatting for an unrelated value. Update both the Helm chart reference (synced from `consul-k8s`, see hashicorp/consul-k8s#2583) and the docs on using `externalServers`. * [OSS] Improve xDS Code Coverage - Endpoints and Misc (#18222) test: improve xDS endpoints code coverage * Clarify license reporting timing and GDPR compliance (#18237) Add Alicia's edits to clarify log timing and other details * Fix Github Workflow File (#18241) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix * Fix tests --------- Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236) * Align build arch matrix with enterprise (#18235) Ensure that OSS remains in sync w/ Enterprise by aligning the format of arch matrix args for various build jobs. * Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes" (#18248) Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236)" This reverts commit a11dba710e6ce6f172c0fa6c9b61567cc1efffc8. * resource: Add scope to resource type registration [NET-4976] (#18214) Enables querying a resource type's registration to determine if a resource is cluster, partition, or partition and namespace scoped. * Fix some inconsistencies in jwt docs (#18234) * NET-1825: More new ACL token creation docs (#18063) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [CC-5719] Add support for builtin global-read-only policy * Add changelog * Add read-only to docs * Fix some minor issues. * Change from ReplaceAll to Sprintf * Change IsValidPolicy name to return an error instead of bool * Fix PolicyList test * Fix other tests * Apply suggestions from code review Co-authored-by: Paul Glass <pglass@hashicorp.com> * Fix state store test for policy list. * Fix naming issues * Update acl/validation.go Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * backport of commit d63fa5481dc02c6faae7cc2647b4073b3286af1d * backport of commit 3d099a6ed8ed10b6dc464c466cb1668914db8f08 --------- Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Dan Bond <danbond@protonmail.com> Signed-off-by: josh <josh.timmons@hashicorp.com> Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> Co-authored-by: Ronald <roncodingenthusiast@users.noreply.github.com> Co-authored-by: Eric Haberkorn <erichaberkorn@gmail.com> Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com> Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Bryce Kalow <bkalow@hashicorp.com> Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: Matt Keeler <mkeeler@users.noreply.github.com> Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> Co-authored-by: Poonam Jadhav <poonam.jadhav@hashicorp.com> Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> Co-authored-by: Hariram Sankaran <56744845+ramramhariram@users.noreply.github.com> Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: Thomas Eckert <teckert@hashicorp.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Co-authored-by: Joshua Timmons <josh.timmons@hashicorp.com> Co-authored-by: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: Curt Bushko <cbushko@gmail.com> Co-authored-by: Tobias Birkefeld <t@craxs.de> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Co-authored-by: Semir Patel <semir.patel@hashicorp.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chappie <6537530+chapmanc@users.noreply.github.com> Co-authored-by: Derek Menteer <105233703+hashi-derek@users.noreply.github.com> Co-authored-by: John Murret <john.murret@hashicorp.com> Co-authored-by: Mark Campbell-Vincent <mnmvincent@gmail.com> Co-authored-by: Daniel Upton <daniel@floppy.co> Co-authored-by: Steven Zamborsky <97125550+stevenzamborsky@users.noreply.github.com> Co-authored-by: George Bolo <george.bolo@gmail.com> Co-authored-by: Chris S. Kim <ckim@hashicorp.com> Co-authored-by: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Co-authored-by: cskh <hui.kang@hashicorp.com> Co-authored-by: V. K <cn007b@gmail.com> Co-authored-by: Iryna Shustava <ishustava@users.noreply.github.com> Co-authored-by: Alex Simenduev <shamil.si@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: Dan Bond <danbond@protonmail.com> Co-authored-by: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Co-authored-by: Gerard Nguyen <gerard@hashicorp.com> Co-authored-by: mr-miles <miles.waller@gmail.com> Co-authored-by: natemollica-dev <57850649+natemollica-nm@users.noreply.github.com> Co-authored-by: John Maguire <john.maguire@hashicorp.com> Co-authored-by: Samantha <hello@entropy.cat> Co-authored-by: Ranjandas <thejranjan@gmail.com> Co-authored-by: Evan Phoenix <evan@phx.io> Co-authored-by: Michael Hofer <karras@users.noreply.github.com> Co-authored-by: J.C. Jones <james.jc.jones@gmail.com> Co-authored-by: Fulvio <fulviodenza823@gmail.com> Co-authored-by: Krastin Krastev <krastin@hashicorp.com> Co-authored-by: david3a <49253132+david3a@users.noreply.github.com> Co-authored-by: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Co-authored-by: Tom Davies <tom@t-davies.com> Co-authored-by: Vijay <vijayraghav22@gmail.com> Co-authored-by: Eddie Rowe <74205376+eddie-rowe@users.noreply.github.com> Co-authored-by: emilymianeil <eneil@hashicorp.com> Co-authored-by: nv-hashi <80716011+nv-hashi@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> Co-authored-by: hashicorp-tsccr[bot] <129506189+hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: Blake Covarrubias <blake@covarrubi.as> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com> Co-authored-by: Jeremy Jacobson <jjacobson93@users.noreply.github.com> Co-authored-by: lornasong <lornasong@users.noreply.github.com> Co-authored-by: Judith Malnick <judith@hashicorp.com> Co-authored-by: Jeremy Jacobson <jeremy.jacobson@hashicorp.com>
2023-08-01 17:37:13 +00:00
return fmt.Errorf("Changing the Datacenters of the %s policy is not permitted", builtinPolicy.Name)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
2019-04-30 15:45:36 +00:00
if policy.Rules != idMatch.Rules {
Backport of [CC-5719] Add support for builtin global-read-only policy into release/1.16.x (#18345) * [OSS] Post Consul 1.16 updates (#17606) * chore: update dev build to 1.17 * chore(ci): add nightly 1.16 test Drop the oldest and add the newest running release branch to nightly builds. * Add writeAuditRPCEvent to agent_oss (#17607) * Add writeAuditRPCEvent to agent_oss * fix the other diffs * backport change log * Add Envoy and Consul version constraints to Envoy extensions (#17612) * [API Gateway] Fix trust domain for external peered services in synthesis code (#17609) * [API Gateway] Fix trust domain for external peered services in synthesis code * Add changelog * backport ent changes to oss (#17614) * backport ent changes to oss * Update .changelog/_5669.txt Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> --------- Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> * Update intentions.mdx (#17619) Make behaviour of L7 intentions clearer * enterprise changelog update for audit (#17625) * Update list of Envoy versions (#17546) * [API Gateway] Fix rate limiting for API gateways (#17631) * [API Gateway] Fix rate limiting for API gateways * Add changelog * Fix failing unit tests * Fix operator usage tests for api package * sort some imports that are wonky between oss and ent (#17637) * PmTLS and tproxy improvements with failover and L7 traffic mgmt for k8s (#17624) * porting over changes from enterprise repo to oss * applied feedback on service mesh for k8s overview * fixed typo * removed ent-only build script file * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Delete check-legacy-links-format.yml (#17647) * docs: Reference doc updates for permissive mTLS settings (#17371) * Reference doc updates for permissive mTLS settings * Document config entry filtering * Fix minor doc errors (double slashes in link url paths) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add generic experiments configuration and use it to enable catalog v2 resources (#17604) * Add generic experiments configuration and use it to enable catalog v2 resources * Run formatting with -s as CI will validate that this has been done * api-gateway: stop adding all header filters to virtual host when generating xDS (#17644) * Add header filter to api-gateway xDS golden test * Stop adding all header filters to virtual host when generating xDS for api-gateway * Regenerate xDS golden file for api-gateway w/ header filter * fix: add agent info reporting log (#17654) * Add new Consul 1.16 docs (#17651) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * fix build errors --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Default `ProxyType` for builtin extensions (#17657) * Post 1.16.0-rc1 updates (#17663) - Update changelog to include new entries from release - Update submodule versions to latest published * Update service-defaults.mdx (#17656) * docs: Sameness Groups (#17628) * port from enterprise branch * Apply suggestions from code review Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx * next steps * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Remove "BETA" marker from config entries (#17670) * CAPIgw for K8s installation updates for 1.16 (#17627) * trimmed CRD step and reqs from installation * updated tech specs * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * added upgrade instruction * removed tcp port req * described downtime and DT-less upgrades * applied additional review feedback --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * additional feedback on API gateway upgrades (#17677) * additional feedback * Update website/content/docs/api-gateway/upgrades.mdx Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> --------- Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * docs: JWT Authorization for intentions (#17643) * Initial page/nav creation * configuration entry reference page * Usage + fixes * service intentions page * usage * description * config entry updates * formatting fixes * Update website/content/docs/connect/config-entries/service-intentions.mdx Co-authored-by: Paul Glass <pglass@hashicorp.com> * service intentions review fixes * Overview page review fixes * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: minor fixes to JWT auth docs (#17680) * Fixes * service intentions fixes * Fix two WAL metrics in docs/agent/telemetry.mdx (#17593) * updated failover for k8s w-tproxy page title (#17683) * Add release notes 1.16 rc (#17665) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Add release notes for 1.16-rc * Add consul-e license utlization reporting * Update with rc absolute links * Update with rc absolute links * fix typo * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update to use callout component * address typo * docs: FIPS 140-2 Compliance (#17668) * Page + nav + formatting * link fix * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * link fix * Apply suggestions from code review Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix apigw install values file * fix typos in release notes --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix release notes links (#17687) * adding redirects for tproxy and envoy extensions (#17688) * adding redirects * Apply suggestions from code review * Fix FIPS copy (#17691) * fix release notes links * fix typos on fips docs * [NET-4107][Supportability] Log Level set to TRACE and duration set to 5m for consul-debug (#17596) * changed duration to 5 mins and log level to trace * documentation update * change log * ENT merge of ext-authz extension updates (#17684) * docs: Update default values for Envoy extension proxy types (#17676) * fix: stop peering delete routine on leader loss (#17483) * Refactor disco chain prioritize by locality structs (#17696) This includes prioritize by localities on disco chain targets rather than resolvers, allowing different targets within the same partition to have different policies. * agent: remove agent cache dependency from service mesh leaf certificate management (#17075) * agent: remove agent cache dependency from service mesh leaf certificate management This extracts the leaf cert management from within the agent cache. This code was produced by the following process: 1. All tests in agent/cache, agent/cache-types, agent/auto-config, agent/consul/servercert were run at each stage. - The tests in agent matching .*Leaf were run at each stage. - The tests in agent/leafcert were run at each stage after they existed. 2. The former leaf cert Fetch implementation was extracted into a new package behind a "fake RPC" endpoint to make it look almost like all other cache type internals. 3. The old cache type was shimmed to use the fake RPC endpoint and generally cleaned up. 4. I selectively duplicated all of Get/Notify/NotifyCallback/Prepopulate from the agent/cache.Cache implementation over into the new package. This was renamed as leafcert.Manager. - Code that was irrelevant to the leaf cert type was deleted (inlining blocking=true, refresh=false) 5. Everything that used the leaf cert cache type (including proxycfg stuff) was shifted to use the leafcert.Manager instead. 6. agent/cache-types tests were moved and gently replumbed to execute as-is against a leafcert.Manager. 7. Inspired by some of the locking changes from derek's branch I split the fat lock into N+1 locks. 8. The waiter chan struct{} was eventually replaced with a singleflight.Group around cache updates, which was likely the biggest net structural change. 9. The awkward two layers or logic produced as a byproduct of marrying the agent cache management code with the leaf cert type code was slowly coalesced and flattened to remove confusion. 10. The .*Leaf tests from the agent package were copied and made to work directly against a leafcert.Manager to increase direct coverage. I have done a best effort attempt to port the previous leaf-cert cache type's tests over in spirit, as well as to take the e2e-ish tests in the agent package with Leaf in the test name and copy those into the agent/leafcert package to get more direct coverage, rather than coverage tangled up in the agent logic. There is no net-new test coverage, just coverage that was pushed around from elsewhere. * [core]: Pin github action workflows (#17695) * docs: missing changelog for _5517 (#17706) * add enterprise notes for IP-based rate limits (#17711) * add enterprise notes for IP-based rate limits * Apply suggestions from code review Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * added bolded 'Enterprise' in list items. --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Update compatibility.mdx (#17713) * Remove extraneous version info for Config entries (#17716) * Update terminating-gateway.mdx * Update exported-services.mdx * Update mesh.mdx * fix: typo in link to section (#17527) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Bump Alpine to 3.18 (#17719) * Update Dockerfile * Create 17719.txt * NET-1825: New ACL token creation docs (#16465) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * [NET-3865] [Supportability] Additional Information in the output of 'consul operator raft list-peers' (#17582) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * OSS merge: Update error handling login when applying extensions (#17740) * Bump atlassian/gajira-transition from 3.0.0 to 3.0.1 (#17741) Bumps [atlassian/gajira-transition](https://github.com/atlassian/gajira-transition) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/atlassian/gajira-transition/releases) - [Commits](https://github.com/atlassian/gajira-transition/compare/4749176faf14633954d72af7a44d7f2af01cc92b...38fc9cd61b03d6a53dd35fcccda172fe04b36de3) --- updated-dependencies: - dependency-name: atlassian/gajira-transition dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add truncation to body (#17723) * docs: Failover overview minor fix (#17743) * Incorrect symbol * Clarification * slight edit for clarity * docs - update Envoy and Dataplane compat matrix (#17752) * Update envoy.mdx added more detail around default versus other compatible versions * validate localities on agent configs and registration endpoints (#17712) * Updated docs added explanation. (#17751) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Update index.mdx (#17749) * added redirects and updated links (#17764) * Add transparent proxy enhancements changelog (#17757) * docs - remove use of consul leave during upgrade instructions (#17758) * Fix issue with streaming service health watches. (#17775) Fix issue with streaming service health watches. This commit fixes an issue where the health streams were unaware of service export changes. Whenever an exported-services config entry is modified, it is effectively an ACL change. The bug would be triggered by the following situation: - no services are exported - an upstream watch to service X is spawned - the streaming backend filters out data for service X (due to lack of exports) - service X is finally exported In the situation above, the streaming backend does not trigger a refresh of its data. This means that any events that were supposed to have been received prior to the export are NOT backfilled, and the watches never see service X spawning. We currently have decided to not trigger a stream refresh in this situation due to the potential for a thundering herd effect (touching exports would cause a re-fetch of all watches for that partition, potentially). Therefore, a local blocking-query approach was added by this commit for agentless. It's also worth noting that the streaming subscription is currently bypassed most of the time with agentful, because proxycfg has a `req.Source.Node != ""` which prevents the `streamingEnabled` check from passing. This means that while agents should technically have this same issue, they don't experience it with mesh health watches. Note that this is a temporary fix that solves the issue for proxycfg, but not service-discovery use cases. * Property Override validation improvements (#17759) * Reject inbound Prop Override patch with Services Services filtering is only supported for outbound TrafficDirection patches. * Improve Prop Override unexpected type validation - Guard against additional invalid parent and target types - Add specific error handling for Any fields (unsupported) * Fixes (#17765) * Update license get explanation (#17782) This PR is to clarify what happens if the license get command is run on a follower if the leader hasn't been updated with a newer license. * Add Patch index to Prop Override validation errors (#17777) When a patch is found invalid, include its index for easier debugging when multiple patches are provided. * Stop referenced jwt providers from being deleted (#17755) * Stop referenced jwt providers from being deleted * Implement a Catalog Controllers Lifecycle Integration Test (#17435) * Implement a Catalog Controllers Lifecycle Integration Test * Prevent triggering the race detector. This allows defining some variables for protobuf constants and using those in comparisons. Without that, something internal in the fmt package ended up looking at the protobuf message size cache and triggering the race detector. * HCP Add node id/name to config (#17750) * Catalog V2 Container Based Integration Test (#17674) * Implement the Catalog V2 controller integration container tests This now allows the container tests to import things from the root module. However for now we want to be very restrictive about which packages we allow importing. * Add an upgrade test for the new catalog Currently this should be dormant and not executed. However its put in place to detect breaking changes in the future and show an example of how to do an upgrade test with integration tests structured like catalog v2. * Make testutil.Retry capable of performing cleanup operations These cleanup operations are executed after each retry attempt. * Move TestContext to taking an interface instead of a concrete testing.T This allows this to be used on a retry.R or generally anything that meets the interface. * Move to using TestContext instead of background contexts Also this forces all test methods to implement the Cleanup method now instead of that being an optional interface. Co-authored-by: Daniel Upton <daniel@floppy.co> * Fix Docs for Trails Leader By (#17763) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added * fix doc * fix docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Improve Prop Override docs examples (#17799) - Provide more realistics examples for setting properties not already supported natively by Consul - Remove superfluous commas from HCL, correct target service name, and fix service defaults vs. proxy defaults in examples - Align existing integration test to updated docs * Test permissive mTLS filter chain not configured with tproxy disabled (#17747) * Add documentation for remote debugging of integration tests. (#17800) * Add documentation for remote debugging of integration tests. * add link from main docs page. * changes related to PR feedback * Clarify limitations of Prop Override extension (#17801) Explicitly document the limitations of the extension, particularly what kind of fields it is capable of modifying. * Fix formatting for webhook-certs Consul tutorial (#17810) * Fix formatting for webhook-certs Consul tutorial * Make a small grammar change to also pick up whitespace changes necessary for formatting --------- Co-authored-by: David Yu <dyu@hashicorp.com> * Add jwt-authn metrics to jwt-provider docs (#17816) * [NET-3095] add jwt-authn metrics docs * Change URLs for redirects from RC to default latest (#17822) * Set GOPRIVATE for all hashicorp repos in CI (#17817) Consistently set GOPRIVATE to include all hashicorp repos, s.t. private modules are successfully pulled in enterprise CI. * Make locality aware routing xDS changes (#17826) * Fixup consul-container/test/debugging.md (#17815) Add missing `-t` flag and fix minor typo. * fixes #17732 - AccessorID in request body should be optional when updating ACL token (#17739) * AccessorID in request body should be optional when updating ACL token * add a test case * fix test case * add changelog entry for PR #17739 * CA provider doc updates and Vault provider minor update (#17831) Update CA provider docs Clarify that providers can differ between primary and secondary datacenters Provide a comparison chart for consul vs vault CA providers Loosen Vault CA provider validation for RootPKIPath Update Vault CA provider documentation * ext-authz Envoy extension: support `localhost` as a valid target URI. (#17821) * CI Updates (#17834) * Ensure that git access to private repos uses the ELEVATED_GITHUB_TOKEN * Bump the runner size for the protobuf generation check This has failed previously when the runner process that communicates with GitHub gets starved causing the job to fail. * counter part of ent pr (#17618) * watch: support -filter for consul watch: checks, services, nodes, service (#17780) * watch: support -filter for watch checks * Add filter for watch nodes, services, and service - unit test added - Add changelog - update doc * Trigger OSS => ENT merge for all release branches (#17853) Previously, this only triggered for release/*.*.x branches; however, our release process involves cutting a release/1.16.0 branch, for example, at time of code freeze these days. Any PRs to that branch after code freeze today do not make their way to consul-enterprise. This will make behavior for a .0 branch consistent with current behavior for a .x branch. * Update service-mesh.mdx (#17845) Deleted two commas which looks quite like some leftovers. * Add docs for sameness groups with resolvers. (#17851) * docs: add note about path prefix matching behavior for HTTPRoute config (#17860) * Add note about path prefix matching behavior for HTTPRoute config * Update website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: update upgrade to consul-dataplane docs on k8s (#17852) * resource: add `AuthorizerContext` helper method (#17393) * resource: enforce consistent naming of resource types (#17611) For consistency, resource type names must follow these rules: - `Group` must be snake case, and in most cases a single word. - `GroupVersion` must be lowercase, start with a "v" and end with a number. - `Kind` must be pascal case. These were chosen because they map to our protobuf type naming conventions. * tooling: generate protoset file (#17364) Extends the `proto` make target to generate a protoset file for use with grpcurl etc. * Fix a bug that wrongly trims domains when there is an overlap with DC name (#17160) * Fix a bug that wrongly trims domains when there is an overlap with DC name Before this change, when DC name and domain/alt-domain overlap, the domain name incorrectly trimmed from the query. Example: Given: datacenter = dc-test, alt-domain = test.consul. Querying for "test-node.node.dc-test.consul" will faile, because the code was trimming "test.consul" instead of just ".consul" This change, fixes the issue by adding dot (.) before trimming * trimDomain: ensure domain trimmed without modyfing original domains * update changelog --------- Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * deps: aws-sdk-go v1.44.289 (#17876) Signed-off-by: Dan Bond <danbond@protonmail.com> * api-gateway: add operation cannot be fulfilled error to common errors (#17874) * add error message * Update website/content/docs/api-gateway/usage/errors.mdx Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * fix formating issues --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * api-gateway: add step to upgrade instructions for creating intentions (#17875) * Changelog - add 1.13.9, 1.14.8, and 1.15.4 (#17889) * docs: update config enable_debug (#17866) * update doc for config enable_debug * Update website/content/docs/agent/config/config-files.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update wording on WAN fed and intermediate_pki_path (#17850) * Allow service identity tokens the ability to read jwt-providers (#17893) * Allow service identity tokens the ability to read jwt-providers * more tests * service_prefix tests * Update docs (#17476) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add emit_tags_as_labels to envoy bootstrap config when using Consul Telemetry Collector (#17888) * Fix command from kg to kubectl get (#17903) * Create and update release notes for 1.16 and 1.2 (#17895) * update release notes for 1.16 and 1.2 * update latest consul core release * Propose new changes to APIgw upgrade instructions (#17693) * Propose new changes to APIgw upgrade instructions * fix build error * update callouts to render correctly * Add hideClipboard to log messages * Added clarification around consul k8s and crds * Add workflow to verify linux release packages (#17904) * adding docker files to verify linux packages. * add verifr-release-linux.yml * updating name * pass inputs directly into jobs * add other linux package platforms * remove on push * fix TARGETARCH on debian and ubuntu so it can check arm64 and amd64 * fixing amazon to use the continue line * add ubuntu i386 * fix comment lines * working * remove commented out workflow jobs * Apply suggestions from code review Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * update fedora and ubuntu to use latest tag --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * Reference hashicorp/consul instead of consul for Docker image (#17914) * Reference hashicorp/consul instead of consul for Docker image * Update Make targets that pull consul directly * Update Consul K8s Upgrade Doc Updates (#17921) Updating upgrade procedures to encompass expected errors during upgrade process from v1.13.x to v1.14.x. * Update sameness-group.mdx (#17915) * Update create-sameness-groups.mdx (#17927) * deps: coredns v1.10.1 (#17912) * Ensure RSA keys are at least 2048 bits in length (#17911) * Ensure RSA keys are at least 2048 bits in length * Add changelog * update key length check for FIPS compliance * Fix no new variables error and failing to return when error exists from validating * clean up code for better readability * actually return value * tlsutil: Fix check TLS configuration (#17481) * tlsutil: Fix check TLS configuration * Rewording docs. * Update website/content/docs/services/configuration/checks-configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Fix typos and add changelog entry. --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Deprecations for connect-native SDK and specific connect native APIs (#17937) * Update v1_16_x.mdx * Update connect native golang page --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Revert "Add workflow to verify linux release packages (#17904)" (#17942) This reverts commit 3368f14fab500ebe9f6aeab5631dd1d5f5a453e5. * Fixes Secondary ConnectCA update (#17846) This fixes a bug that was identified which resulted in subsequent ConnectCA configuration update not to persist in the cluster. * fixing typo in link to jwt-validations-with-intentions doc (#17955) * Fix streaming backend link (#17958) * Fix streaming backend link * Update health.mdx * Dynamically create jwks clusters for jwt-providers (#17944) * website: remove deprecated agent rpc docs (#17962) * Fix missing BalanceOutboundConnections in v2 catalog. (#17964) * feature - [NET - 4005] - [Supportability] Reloadable Configuration - enable_debug (#17565) * # This is a combination of 9 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same * # This is a combination of 12 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same # This is the commit message #10: made enable debug atomic bool # This is the commit message #11: fix lint # This is the commit message #12: fix test true enable debug * parent 10f500e895d92cc3691ade7b74a33db755d22039 author absolutelightning <ashesh.vidyut@hashicorp.com> 1687352587 +0530 committer absolutelightning <ashesh.vidyut@hashicorp.com> 1687352592 +0530 init without tests change log fix tests fix tests added tests change log breaking change removed breaking change fix test keeping the test behaviour same made enable debug atomic bool fix lint fix test true enable debug using enable debug in agent as atomic bool test fixes fix tests fix tests added update on correct locaiton fix tests fix reloadable config enable debug fix tests fix init and acl 403 * revert commit * Fix formatting codeblocks on APIgw docs (#17970) * fix formatting codeblocks * remove unnecessary indents * Remove POC code (#17974) * update doc (#17910) * update doc * update link * Remove duplicate and unused newDecodeConfigEntry func (#17979) * docs: samenessGroup YAML examples (#17984) * configuration entry syntax * Example config * Add changelog entry for 1.16.0 (#17987) * Fix typo (#17198) servcies => services * Expose JWKS cluster config through JWTProviderConfigEntry (#17978) * Expose JWKS cluster config through JWTProviderConfigEntry * fix typos, rename trustedCa to trustedCA * Integration test for ext-authz Envoy extension (#17980) * Fix incorrect protocol for transparent proxy upstreams. (#17894) This PR fixes a bug that was introduced in: https://github.com/hashicorp/consul/pull/16021 A user setting a protocol in proxy-defaults would cause tproxy implicit upstreams to not honor the upstream service's protocol set in its `ServiceDefaults.Protocol` field, and would instead always use the proxy-defaults value. Due to the fact that upstreams configured with "tcp" can successfully contact upstream "http" services, this issue was not recognized until recently (a proxy-defaults with "tcp" and a listening service with "http" would make successful requests, but not the opposite). As a temporary work-around, users experiencing this issue can explicitly set the protocol on the `ServiceDefaults.UpstreamConfig.Overrides`, which should take precedence. The fix in this PR removes the proxy-defaults protocol from the wildcard upstream that tproxy uses to configure implicit upstreams. When the protocol was included, it would always overwrite the value during discovery chain compilation, which was not correct. The discovery chain compiler also consumes proxy defaults to determine the protocol, so simply excluding it from the wildcard upstream config map resolves the issue. * feat: include nodes count in operator usage endpoint and cli command (#17939) * feat: update operator usage api endpoint to include nodes count * feat: update operator usange cli command to includes nodes count * [OSS] Improve Gateway Test Coverage of Catalog Health (#18011) * fix(cli): remove failing check from 'connect envoy' registration for api gateway * test(integration): add tests to check catalog statsus of gateways on startup * remove extra sleep comment * Update test/integration/consul-container/libs/assert/service.go * changelog * Fixes Traffic rate limitting docs (#17997) * Fix removed service-to-service peering links (#17221) * docs: fix removed service-to-service peering links * docs: extend peering-via-mesh-gateways intro (thanks @trujillo-adam) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Sameness "beta" warning (#18017) * Warning updates * .x * updated typo in tab heading (#18022) * updated typo in tab heading * updated tab group typo, too * Document that DNS lookups can target cluster peers (#17990) Static DNS lookups, in addition to explicitly targeting a datacenter, can target a cluster peer. This was added in 95dc0c7b301b70a6b955a8b7c9737c9b86f03df6 but didn't make the documentation. The driving function for the change is `parseLocality` here: https://github.com/hashicorp/consul/blob/0b1299c28d8127129d61310ee4280055298438e0/agent/dns_oss.go#L25 The biggest change in this is to adjust the standard lookup syntax to tie `.<datacenter>` to `.dc` as required-together, and to append in the similar `.<cluster-peer>.peer` optional argument, both to A record and SRV record lookups. Co-authored-by: David Yu <dyu@hashicorp.com> * Add first integration test for jwt auth with intention (#18005) * fix stand-in text for name field (#18030) * removed sameness conf entry from failover nav (#18033) * docs - add service sync annotations and k8s service weight annotation (#18032) * Docs for https://github.com/hashicorp/consul-k8s/pull/2293 * remove versions for enterprise features since they are old --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * docs - add jobs use case for service mesh k8s (#18037) * docs - add jobs use case for service mesh k8s * add code blocks * address feedback (#18045) * Add verify server hostname to tls default (#17155) * [OSS] Fix initial_fetch_timeout to wait for all xDS resources (#18024) * fix(connect): set initial_fetch_time to wait indefinitely * changelog * PR feedback 1 * ui: fix typos for peer service imports (#17999) * test: fix FIPS inline cert test message (#18076) * Fix a couple typos in Agent Telemetry Metrics docs (#18080) * Fix metrics docs * Add changelog Signed-off-by: josh <josh.timmons@hashicorp.com> --------- Signed-off-by: josh <josh.timmons@hashicorp.com> * docs updates - cluster peering and virtual services (#18069) * Update route-to-virtual-services.mdx * Update establish-peering.mdx * Update service-mesh-compare.mdx (#17279) grammar change * Update helm docs on main (#18085) * ci: use gotestsum v1.10.1 [NET-4042] (#18088) * Docs: Update proxy lifecycle annotations and consul-dataplane flags (#18075) * Update proxy lifecycle annotations and consul-dataplane flags * Pass configured role name to Vault for AWS auth in Connect CA (#17885) * Docs for dataplane upgrade on k8s (#18051) * Docs for dataplane upgrade on k8s --------- Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs - update upgrade index page to not recommend consul leave. (#18100) * Displays Consul version of each nodes in UI nodes section (#17754) * update UINodes and UINodeInfo response with consul-version info added as NodeMeta, fetched from serf members * update test cases TestUINodes, TestUINodeInfo * added nil check for map * add consul-version in local agent node metadata * get consul version from serf member and add this as node meta in catalog register request * updated ui mock response to include consul versions as node meta * updated ui trans and added version as query param to node list route * updates in ui templates to display consul version with filter and sorts * updates in ui - model class, serializers,comparators,predicates for consul version feature * added change log for Consul Version Feature * updated to get version from consul service, if for some reason not available from serf * updated changelog text * updated dependent testcases * multiselection version filter * Update agent/consul/state/catalog.go comments updated Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --------- Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * api gw 1.16 updates (#18081) * api gw 1.16 updates * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * update CodeBlockConfig filename * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * remove non-standard intentions page * Update website/content/docs/api-gateway/configuration/index.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [NET-4103] ci: build s390x (#18067) * ci: build s390x * ci: test s390x * ci: dev build s390x * no GOOS * ent only * build: publish s390x * fix syntax error * fix syntax error again * fix syntax error again x2 * test branch * Move s390x conditionals to step level * remove test branch --------- Co-authored-by: emilymianeil <eneil@hashicorp.com> * :ermahgerd "Sevice Mesh" -> "Service Mesh" (#18116) Just a typo in the docs. * Split pbmesh.UpstreamsConfiguration as a resource out of pbmesh.Upstreams (#17991) Configuration that previously was inlined into the Upstreams resource applies to both explicit and implicit upstreams and so it makes sense to split it out into its own resource. It also has other minor changes: - Renames `proxy.proto` proxy_configuration.proto` - Changes the type of `Upstream.destination_ref` from `pbresource.ID` to `pbresource.Reference` - Adds comments to fields that didn't have them * [NET-4895] ci - api tests and consul container tests error because of dependency bugs with go 1.20.6. Pin go to 1.20.5. (#18124) ### Description The following jobs started failing when go 1.20.6 was released: - `go-test-api-1-19` - `go-test-api-1-20` - `compatibility-integration-tests` - `upgrade-integration-tests` `compatibility-integration-tests` and `compatibility-integration-tests` to this testcontainers issue: https://github.com/testcontainers/testcontainers-go/issues/1359. This issue calls for testcontainers to release a new version when one of their dependencies is fixed. When that is done, we will unpin the go versions in `compatibility-integration-tests` and `compatibility-integration-tests`. ### Testing & Reproduction steps See these jobs broken in CI and then see them work with this PR. --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * Add ingress gateway deprecation notices to docs (#18102) ### Description This adds notices, that ingress gateway is deprecated, to several places in the product docs where ingress gateway is the topic. ### Testing & Reproduction steps Tested with a local copy of the website. ### Links Deprecation of ingress gateway was announced in the Release Notes for Consul 1.16 and Consul-K8s 1.2. See: [https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated ) [https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated) ### PR Checklist * [N/A] updated test coverage * [X] external facing docs updated * [X] appropriate backport labels added * [X] not a security concern --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add docs for jwt cluster configuration (#18004) ### Description <!-- Please describe why you're making this change, in plain English. --> - Add jwt-provider docs for jwks cluster configuration. The configuration was added here: https://github.com/hashicorp/consul/pull/17978 * Docs: fix unmatched bracket for health checks page (#18134) * NET-4657/add resource service client (#18053) ### Description <!-- Please describe why you're making this change, in plain English. --> Dan had already started on this [task](https://github.com/hashicorp/consul/pull/17849) which is needed to start building the HTTP APIs. This just needed some cleanup to get it ready for review. Overview: - Rename `internalResourceServiceClient` to `insecureResourceServiceClient` for name consistency - Configure a `secureResourceServiceClient` with auth enabled ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Fix bug with Vault CA provider (#18112) Updating RootPKIPath but not IntermediatePKIPath would not update leaf signing certs with the new root. Unsure if this happens in practice but manual testing showed it is a bug that would break mesh and agent connections once the old root is pruned. * [NET-4897] net/http host header is now verified and request.host that contains socked now error (#18129) ### Description This is related to https://github.com/hashicorp/consul/pull/18124 where we pinned the go versions in CI to 1.20.5 and 1.19.10. go 1.20.6 and 1.19.11 now validate request host headers for validity, including the hostname cannot be prefixed with slashes. For local communications (npipe://, unix://), the hostname is not used, but we need valid and meaningful hostname. Prior versions go Go would clean the host header, and strip slashes in the process, but go1.20.6 and go1.19.11 no longer do, and reject the host header. Around the community we are seeing that others are intercepting the req.host and if it starts with a slash or ends with .sock, they changing the host to localhost or another dummy value. [client: define a "dummy" hostname to use for local connections by thaJeztah · Pull Request #45942 · moby/moby](https://github.com/moby/moby/pull/45942) ### Testing & Reproduction steps Check CI tests. ### Links * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * add a conditional around setting LANFilter.AllSegments to make sure it is valid (#18139) ### Description This is to correct a code problem because this assumes all segments, but when you get to Enterprise, you can be in partition that is not the default partition, in which case specifying all segments does not validate and fails. This is to correct the setting of this filter with `AllSegments` to `true` to only occur when in the the `default` partition. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> ### PR Checklist * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * chore: bump upgrade integrations tests to 1.15, 116 [NET-4743] (#18130) * re org resource type registry (#18133) * fix: update delegateMock used in ENT (#18149) ### Description <!-- Please describe why you're making this change, in plain English. --> The mock is used in `http_ent_test` file which caused lint failures. For OSS->ENT parity adding the same change here. ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> Identified in OSS->ENT [merge PR](https://github.com/hashicorp/consul-enterprise/pull/6328) ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062) ### Description <!-- Please describe why you're making this change, in plain English. --> - Currently the jwt-auth filter doesn't take into account the service identity when validating jwt-auth, it only takes into account the path and jwt provider during validation. This causes issues when multiple source intentions restrict access to an endpoint with different JWT providers. - To fix these issues, rather than use the JWT auth filter for validation, we use it in metadata mode and allow it to forward the successful validated JWT token payload to the RBAC filter which will make the decisions. This PR ensures requests with and without JWT tokens successfully go through the jwt-authn filter. The filter however only forwards the data for successful/valid tokens. On the RBAC filter level, we check the payload for claims and token issuer + existing rbac rules. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> - This test covers a multi level jwt requirements (requirements at top level and permissions level). It also assumes you have envoy running, you have a redis and a sidecar proxy service registered, and have a way to generate jwks with jwt. I mostly use: https://www.scottbrady91.com/tools/jwt for this. - first write your proxy defaults ``` Kind = "proxy-defaults" name = "global" config { protocol = "http" } ``` - Create two providers ``` Kind = "jwt-provider" Name = "auth0" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjog....." } } ``` ``` Kind = "jwt-provider" Name = "okta" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjogW3...." } } ``` - add a service intention ``` Kind = "service-intentions" Name = "redis" JWT = { Providers = [ { Name = "okta" }, ] } Sources = [ { Name = "*" Permissions = [{ Action = "allow" HTTP = { PathPrefix = "/workspace" } JWT = { Providers = [ { Name = "okta" VerifyClaims = [ { Path = ["aud"] Value = "my_client_app" }, { Path = ["sub"] Value = "5be86359073c434bad2da3932222dabe" } ] }, ] } }, { Action = "allow" HTTP = { PathPrefix = "/" } JWT = { Providers = [ { Name = "auth0" }, ] } }] } ] ``` - generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with different claims than `/workspace` expects and 1 with correct claims - connect to your envoy (change service and address as needed) to view logs and potential errors. You can add: `-- --log-level debug` to see what data is being forwarded ``` consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502 ``` - Make the following requests: ``` curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v Successful request ``` ### TODO * [x] Update test coverage * [ ] update integration tests (follow-up PR) * [x] appropriate backport labels added * Support Consul Connect Envoy Command on Windows (#17694) ### Description Add support for consul connect envoy command on windows. This PR fixes the comments of PR - https://github.com/hashicorp/consul/pull/15114 ### Testing * Built consul.exe from this branch on windows and hosted here - [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Updated the [tutorial](https://developer.hashicorp.com/consul/tutorials/developer-mesh/consul-windows-workloads) and changed the `consul_url.default` value to [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Followed the steps in the tutorial and verified that everything is working as described. ### PR Checklist * [x] updated test coverage * [ ] external facing docs updated * [x] appropriate backport labels added * [x] not a security concern --------- Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * Change docs to say 168h instead of 7d for server_rejoin_age_max (#18154) ### Description Addresses https://github.com/hashicorp/consul/pull/17171#issuecomment-1636930705 * [OSS] test: improve xDS listener code coverage (#18138) test: improve xDS listener code coverage * Re-order expected/actual for assertContainerState in consul container tests (#18157) Re-order expected/actual, consul container tests * group and document make file (#17943) * group and document make file * Add `testing/deployer` (neé `consul-topology`) [NET-4610] (#17823) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> * [NET-4792] Add integrations tests for jwt-auth (#18169) * Add FIPS reference to consul enterprise docs (#18028) * Add FIPS reference to consul enterprise docs * Update website/content/docs/enterprise/index.mdx Co-authored-by: David Yu <dyu@hashicorp.com> * remove support for ecs client (fips) --------- Co-authored-by: David Yu <dyu@hashicorp.com> * add peering_commontopo tests [NET-3700] (#17951) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> * docs - remove Sentinel from enterprise features list (#18176) * Update index.mdx * Update kv.mdx * Update docs-nav-data.json * delete sentinel.mdx * Update redirects.js --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * [NET-4865] Bump golang.org/x/net to 0.12.0 (#18186) Bump golang.org/x/net to 0.12.0 While not necessary to directly address CVE-2023-29406 (which should be handled by using a patched version of Go when building), an accompanying change to HTTP/2 error handling does impact agent code. See https://go-review.googlesource.com/c/net/+/506995 for the HTTP/2 change. Bump this dependency across our submodules as well for the sake of potential indirect consumers of `x/net/http`. * Call resource mutate hook before validate hook (NET-4907) (#18178) * [NET-4865] security: Update Go version to 1.20.6 (#18190) Update Go version to 1.20.6 This resolves [CVE-2023-29406] (https://nvd.nist.gov/vuln/detail/CVE-2023-29406) for uses of the `net/http` standard library. Note that until the follow-up to #18124 is done, the version of Go used in those impacted tests will need to remain on 1.20.5. * Improve XDS test coverage: JWT auth edition (#18183) * Improve XDS test coverage: JWT auth edition more tests * test: xds coverage for jwt listeners --------- Co-authored-by: DanStough <dan.stough@hashicorp.com> * update readme.md (#18191) u[date readme.md * Update submodules to latest following 1.16.0 (#18197) Align all our internal use of submodules on the latest versions. * SEC-090: Automated trusted workflow pinning (2023-07-18) (#18174) Result of tsccr-helper -log-level=info -pin-all-workflows . Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> * Fix Backport Assistant PR commenting (#18200) * Fix Backport Assistant failure PR commenting For general comments on a PR, it looks like you have to use the `/issue` endpoint rather than `/pulls`, which requires commit/other review-specific target details. This matches the endpoint used in `backport-reminder.yml`. * Remove Backport Reminder workflow This is noisy (even when adding multiple labels, individual comments per label are generated), and likely no longer needed: we haven't had this work in a long time due to an expired GH token, and we now have better automation for backport PR assignment. * resource: Pass resource to Write ACL hook instead of just resource Id [NET-4908] (#18192) * Explicitly enable WebSocket upgrades (#18150) This PR explicitly enables WebSocket upgrades in Envoy's UpgradeConfig for all proxy types. (API Gateway, Ingress, and Sidecar.) Fixes #8283 * docs: fix the description of client rpc (#18206) * NET-4804: Add dashboard for monitoring consul-k8s (#18208) * [OSS] Improve xDS Code Coverage - Clusters (#18165) test: improve xDS cluster code coverage * NET-4222 take config file consul container (#18218) Net 4222 take config file consul container * Envoy Integration Test Windows (#18007) * [CONSUL-395] Update check_hostport and Usage (#40) * [CONSUL-397] Copy envoy binary from Image (#41) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix --------- Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * fix typos and update ecs compat table (#18215) * fix typos and update ecs compat table * real info for the ecs compat matrix table * Update website/content/docs/ecs/compatibility.mdx Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * [OSS] proxystate: add proxystate protos (#18216) * proxystate: add proxystate protos to pbmesh and resolve imports and conflicts between message names * ci: don't verify s390x (#18224) * [CC-5718] Remove HCP token requirement during bootstrap (#18140) * [CC-5718] Remove HCP token requirement during bootstrap * Re-add error for loading HCP management token * Remove old comment * Add changelog entry * Remove extra validation line * Apply suggestions from code review Co-authored-by: lornasong <lornasong@users.noreply.github.com> --------- Co-authored-by: lornasong <lornasong@users.noreply.github.com> * [NET-4122] Doc guidance for federation with externalServers (#18207) Doc guidance for federation with externalServers Add guidance for proper configuration when joining to a secondary cluster using WAN fed with external servers also enabled. Also clarify federation requirements and fix formatting for an unrelated value. Update both the Helm chart reference (synced from `consul-k8s`, see hashicorp/consul-k8s#2583) and the docs on using `externalServers`. * [OSS] Improve xDS Code Coverage - Endpoints and Misc (#18222) test: improve xDS endpoints code coverage * Clarify license reporting timing and GDPR compliance (#18237) Add Alicia's edits to clarify log timing and other details * Fix Github Workflow File (#18241) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix * Fix tests --------- Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236) * Align build arch matrix with enterprise (#18235) Ensure that OSS remains in sync w/ Enterprise by aligning the format of arch matrix args for various build jobs. * Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes" (#18248) Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236)" This reverts commit a11dba710e6ce6f172c0fa6c9b61567cc1efffc8. * resource: Add scope to resource type registration [NET-4976] (#18214) Enables querying a resource type's registration to determine if a resource is cluster, partition, or partition and namespace scoped. * Fix some inconsistencies in jwt docs (#18234) * NET-1825: More new ACL token creation docs (#18063) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [CC-5719] Add support for builtin global-read-only policy * Add changelog * Add read-only to docs * Fix some minor issues. * Change from ReplaceAll to Sprintf * Change IsValidPolicy name to return an error instead of bool * Fix PolicyList test * Fix other tests * Apply suggestions from code review Co-authored-by: Paul Glass <pglass@hashicorp.com> * Fix state store test for policy list. * Fix naming issues * Update acl/validation.go Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * backport of commit d63fa5481dc02c6faae7cc2647b4073b3286af1d * backport of commit 3d099a6ed8ed10b6dc464c466cb1668914db8f08 --------- Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Dan Bond <danbond@protonmail.com> Signed-off-by: josh <josh.timmons@hashicorp.com> Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> Co-authored-by: Ronald <roncodingenthusiast@users.noreply.github.com> Co-authored-by: Eric Haberkorn <erichaberkorn@gmail.com> Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com> Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Bryce Kalow <bkalow@hashicorp.com> Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: Matt Keeler <mkeeler@users.noreply.github.com> Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> Co-authored-by: Poonam Jadhav <poonam.jadhav@hashicorp.com> Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> Co-authored-by: Hariram Sankaran <56744845+ramramhariram@users.noreply.github.com> Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: Thomas Eckert <teckert@hashicorp.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Co-authored-by: Joshua Timmons <josh.timmons@hashicorp.com> Co-authored-by: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: Curt Bushko <cbushko@gmail.com> Co-authored-by: Tobias Birkefeld <t@craxs.de> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Co-authored-by: Semir Patel <semir.patel@hashicorp.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chappie <6537530+chapmanc@users.noreply.github.com> Co-authored-by: Derek Menteer <105233703+hashi-derek@users.noreply.github.com> Co-authored-by: John Murret <john.murret@hashicorp.com> Co-authored-by: Mark Campbell-Vincent <mnmvincent@gmail.com> Co-authored-by: Daniel Upton <daniel@floppy.co> Co-authored-by: Steven Zamborsky <97125550+stevenzamborsky@users.noreply.github.com> Co-authored-by: George Bolo <george.bolo@gmail.com> Co-authored-by: Chris S. Kim <ckim@hashicorp.com> Co-authored-by: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Co-authored-by: cskh <hui.kang@hashicorp.com> Co-authored-by: V. K <cn007b@gmail.com> Co-authored-by: Iryna Shustava <ishustava@users.noreply.github.com> Co-authored-by: Alex Simenduev <shamil.si@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: Dan Bond <danbond@protonmail.com> Co-authored-by: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Co-authored-by: Gerard Nguyen <gerard@hashicorp.com> Co-authored-by: mr-miles <miles.waller@gmail.com> Co-authored-by: natemollica-dev <57850649+natemollica-nm@users.noreply.github.com> Co-authored-by: John Maguire <john.maguire@hashicorp.com> Co-authored-by: Samantha <hello@entropy.cat> Co-authored-by: Ranjandas <thejranjan@gmail.com> Co-authored-by: Evan Phoenix <evan@phx.io> Co-authored-by: Michael Hofer <karras@users.noreply.github.com> Co-authored-by: J.C. Jones <james.jc.jones@gmail.com> Co-authored-by: Fulvio <fulviodenza823@gmail.com> Co-authored-by: Krastin Krastev <krastin@hashicorp.com> Co-authored-by: david3a <49253132+david3a@users.noreply.github.com> Co-authored-by: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Co-authored-by: Tom Davies <tom@t-davies.com> Co-authored-by: Vijay <vijayraghav22@gmail.com> Co-authored-by: Eddie Rowe <74205376+eddie-rowe@users.noreply.github.com> Co-authored-by: emilymianeil <eneil@hashicorp.com> Co-authored-by: nv-hashi <80716011+nv-hashi@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> Co-authored-by: hashicorp-tsccr[bot] <129506189+hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: Blake Covarrubias <blake@covarrubi.as> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com> Co-authored-by: Jeremy Jacobson <jjacobson93@users.noreply.github.com> Co-authored-by: lornasong <lornasong@users.noreply.github.com> Co-authored-by: Judith Malnick <judith@hashicorp.com> Co-authored-by: Jeremy Jacobson <jeremy.jacobson@hashicorp.com>
2023-08-01 17:37:13 +00:00
return fmt.Errorf("Changing the Rules for the builtin %s policy is not permitted", builtinPolicy.Name)
}
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// validate the rules
_, err = acl.NewPolicyFromSource(policy.Rules, a.srv.aclConfig, policy.EnterprisePolicyMeta())
if err != nil {
return err
}
// validate the enterprise specific fields
if err = a.policyUpsertValidateEnterprise(policy, idMatch); err != nil {
2016-08-03 05:04:11 +00:00
return err
}
2018-11-02 17:00:39 +00:00
// calculate the hash for this policy
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
policy.SetHash(true)
req := &structs.ACLPolicyBatchSetRequest{
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
Policies: structs.ACLPolicies{policy},
}
_, err = a.srv.raftApply(structs.ACLPolicySetRequestType, req)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return fmt.Errorf("Failed to apply policy upsert request: %v", err)
}
// Remove from the cache to prevent stale cache usage
a.srv.ACLResolver.cache.RemovePolicy(policy.ID)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if _, policy, err := a.srv.fsm.State().ACLPolicyGetByID(nil, policy.ID, &policy.EnterpriseMeta); err == nil && policy != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
*reply = *policy
}
2014-08-06 00:05:59 +00:00
return nil
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func (a *ACL) PolicyDelete(args *structs.ACLPolicyDeleteRequest, reply *string) error {
if err := a.aclPreCheck(); err != nil {
2014-08-06 00:05:59 +00:00
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.InPrimaryDatacenter() {
args.Datacenter = a.srv.config.PrimaryDatacenter
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
if done, err := a.srv.ForwardRPC("ACL.PolicyDelete", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
defer metrics.MeasureSince([]string{"acl", "policy", "delete"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
_, policy, err := a.srv.fsm.State().ACLPolicyGetByID(nil, args.PolicyID, &args.EnterpriseMeta)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
if policy == nil {
if ns := args.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("policy not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("policy does not exist: %w", acl.ErrNotFound)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
Backport of [CC-5719] Add support for builtin global-read-only policy into release/1.16.x (#18345) * [OSS] Post Consul 1.16 updates (#17606) * chore: update dev build to 1.17 * chore(ci): add nightly 1.16 test Drop the oldest and add the newest running release branch to nightly builds. * Add writeAuditRPCEvent to agent_oss (#17607) * Add writeAuditRPCEvent to agent_oss * fix the other diffs * backport change log * Add Envoy and Consul version constraints to Envoy extensions (#17612) * [API Gateway] Fix trust domain for external peered services in synthesis code (#17609) * [API Gateway] Fix trust domain for external peered services in synthesis code * Add changelog * backport ent changes to oss (#17614) * backport ent changes to oss * Update .changelog/_5669.txt Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> --------- Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> * Update intentions.mdx (#17619) Make behaviour of L7 intentions clearer * enterprise changelog update for audit (#17625) * Update list of Envoy versions (#17546) * [API Gateway] Fix rate limiting for API gateways (#17631) * [API Gateway] Fix rate limiting for API gateways * Add changelog * Fix failing unit tests * Fix operator usage tests for api package * sort some imports that are wonky between oss and ent (#17637) * PmTLS and tproxy improvements with failover and L7 traffic mgmt for k8s (#17624) * porting over changes from enterprise repo to oss * applied feedback on service mesh for k8s overview * fixed typo * removed ent-only build script file * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Delete check-legacy-links-format.yml (#17647) * docs: Reference doc updates for permissive mTLS settings (#17371) * Reference doc updates for permissive mTLS settings * Document config entry filtering * Fix minor doc errors (double slashes in link url paths) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add generic experiments configuration and use it to enable catalog v2 resources (#17604) * Add generic experiments configuration and use it to enable catalog v2 resources * Run formatting with -s as CI will validate that this has been done * api-gateway: stop adding all header filters to virtual host when generating xDS (#17644) * Add header filter to api-gateway xDS golden test * Stop adding all header filters to virtual host when generating xDS for api-gateway * Regenerate xDS golden file for api-gateway w/ header filter * fix: add agent info reporting log (#17654) * Add new Consul 1.16 docs (#17651) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * fix build errors --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Default `ProxyType` for builtin extensions (#17657) * Post 1.16.0-rc1 updates (#17663) - Update changelog to include new entries from release - Update submodule versions to latest published * Update service-defaults.mdx (#17656) * docs: Sameness Groups (#17628) * port from enterprise branch * Apply suggestions from code review Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx * next steps * Update website/content/docs/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/k8s/connect/cluster-peering/usage/create-sameness-groups.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Remove "BETA" marker from config entries (#17670) * CAPIgw for K8s installation updates for 1.16 (#17627) * trimmed CRD step and reqs from installation * updated tech specs * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * added upgrade instruction * removed tcp port req * described downtime and DT-less upgrades * applied additional review feedback --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * additional feedback on API gateway upgrades (#17677) * additional feedback * Update website/content/docs/api-gateway/upgrades.mdx Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> --------- Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * docs: JWT Authorization for intentions (#17643) * Initial page/nav creation * configuration entry reference page * Usage + fixes * service intentions page * usage * description * config entry updates * formatting fixes * Update website/content/docs/connect/config-entries/service-intentions.mdx Co-authored-by: Paul Glass <pglass@hashicorp.com> * service intentions review fixes * Overview page review fixes * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: minor fixes to JWT auth docs (#17680) * Fixes * service intentions fixes * Fix two WAL metrics in docs/agent/telemetry.mdx (#17593) * updated failover for k8s w-tproxy page title (#17683) * Add release notes 1.16 rc (#17665) * Merge pull request #5773 from hashicorp/docs/rate-limiting-from-ip-addresses-1.16 updated docs for rate limiting for IP addresses - 1.16 * Merge pull request #5609 from hashicorp/docs/enterprise-utilization-reporting Add docs for enterprise utilization reporting * Merge pull request #5734 from hashicorp/docs/envoy-ext-1.16 Docs/envoy ext 1.16 * Add release notes for 1.16-rc * Add consul-e license utlization reporting * Update with rc absolute links * Update with rc absolute links * fix typo * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update to use callout component * address typo * docs: FIPS 140-2 Compliance (#17668) * Page + nav + formatting * link fix * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * link fix * Apply suggestions from code review Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * Update website/content/docs/enterprise/fips.mdx --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix apigw install values file * fix typos in release notes --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> * fix release notes links (#17687) * adding redirects for tproxy and envoy extensions (#17688) * adding redirects * Apply suggestions from code review * Fix FIPS copy (#17691) * fix release notes links * fix typos on fips docs * [NET-4107][Supportability] Log Level set to TRACE and duration set to 5m for consul-debug (#17596) * changed duration to 5 mins and log level to trace * documentation update * change log * ENT merge of ext-authz extension updates (#17684) * docs: Update default values for Envoy extension proxy types (#17676) * fix: stop peering delete routine on leader loss (#17483) * Refactor disco chain prioritize by locality structs (#17696) This includes prioritize by localities on disco chain targets rather than resolvers, allowing different targets within the same partition to have different policies. * agent: remove agent cache dependency from service mesh leaf certificate management (#17075) * agent: remove agent cache dependency from service mesh leaf certificate management This extracts the leaf cert management from within the agent cache. This code was produced by the following process: 1. All tests in agent/cache, agent/cache-types, agent/auto-config, agent/consul/servercert were run at each stage. - The tests in agent matching .*Leaf were run at each stage. - The tests in agent/leafcert were run at each stage after they existed. 2. The former leaf cert Fetch implementation was extracted into a new package behind a "fake RPC" endpoint to make it look almost like all other cache type internals. 3. The old cache type was shimmed to use the fake RPC endpoint and generally cleaned up. 4. I selectively duplicated all of Get/Notify/NotifyCallback/Prepopulate from the agent/cache.Cache implementation over into the new package. This was renamed as leafcert.Manager. - Code that was irrelevant to the leaf cert type was deleted (inlining blocking=true, refresh=false) 5. Everything that used the leaf cert cache type (including proxycfg stuff) was shifted to use the leafcert.Manager instead. 6. agent/cache-types tests were moved and gently replumbed to execute as-is against a leafcert.Manager. 7. Inspired by some of the locking changes from derek's branch I split the fat lock into N+1 locks. 8. The waiter chan struct{} was eventually replaced with a singleflight.Group around cache updates, which was likely the biggest net structural change. 9. The awkward two layers or logic produced as a byproduct of marrying the agent cache management code with the leaf cert type code was slowly coalesced and flattened to remove confusion. 10. The .*Leaf tests from the agent package were copied and made to work directly against a leafcert.Manager to increase direct coverage. I have done a best effort attempt to port the previous leaf-cert cache type's tests over in spirit, as well as to take the e2e-ish tests in the agent package with Leaf in the test name and copy those into the agent/leafcert package to get more direct coverage, rather than coverage tangled up in the agent logic. There is no net-new test coverage, just coverage that was pushed around from elsewhere. * [core]: Pin github action workflows (#17695) * docs: missing changelog for _5517 (#17706) * add enterprise notes for IP-based rate limits (#17711) * add enterprise notes for IP-based rate limits * Apply suggestions from code review Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * added bolded 'Enterprise' in list items. --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> * Update compatibility.mdx (#17713) * Remove extraneous version info for Config entries (#17716) * Update terminating-gateway.mdx * Update exported-services.mdx * Update mesh.mdx * fix: typo in link to section (#17527) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Bump Alpine to 3.18 (#17719) * Update Dockerfile * Create 17719.txt * NET-1825: New ACL token creation docs (#16465) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * [NET-3865] [Supportability] Additional Information in the output of 'consul operator raft list-peers' (#17582) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * OSS merge: Update error handling login when applying extensions (#17740) * Bump atlassian/gajira-transition from 3.0.0 to 3.0.1 (#17741) Bumps [atlassian/gajira-transition](https://github.com/atlassian/gajira-transition) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/atlassian/gajira-transition/releases) - [Commits](https://github.com/atlassian/gajira-transition/compare/4749176faf14633954d72af7a44d7f2af01cc92b...38fc9cd61b03d6a53dd35fcccda172fe04b36de3) --- updated-dependencies: - dependency-name: atlassian/gajira-transition dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add truncation to body (#17723) * docs: Failover overview minor fix (#17743) * Incorrect symbol * Clarification * slight edit for clarity * docs - update Envoy and Dataplane compat matrix (#17752) * Update envoy.mdx added more detail around default versus other compatible versions * validate localities on agent configs and registration endpoints (#17712) * Updated docs added explanation. (#17751) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Update index.mdx (#17749) * added redirects and updated links (#17764) * Add transparent proxy enhancements changelog (#17757) * docs - remove use of consul leave during upgrade instructions (#17758) * Fix issue with streaming service health watches. (#17775) Fix issue with streaming service health watches. This commit fixes an issue where the health streams were unaware of service export changes. Whenever an exported-services config entry is modified, it is effectively an ACL change. The bug would be triggered by the following situation: - no services are exported - an upstream watch to service X is spawned - the streaming backend filters out data for service X (due to lack of exports) - service X is finally exported In the situation above, the streaming backend does not trigger a refresh of its data. This means that any events that were supposed to have been received prior to the export are NOT backfilled, and the watches never see service X spawning. We currently have decided to not trigger a stream refresh in this situation due to the potential for a thundering herd effect (touching exports would cause a re-fetch of all watches for that partition, potentially). Therefore, a local blocking-query approach was added by this commit for agentless. It's also worth noting that the streaming subscription is currently bypassed most of the time with agentful, because proxycfg has a `req.Source.Node != ""` which prevents the `streamingEnabled` check from passing. This means that while agents should technically have this same issue, they don't experience it with mesh health watches. Note that this is a temporary fix that solves the issue for proxycfg, but not service-discovery use cases. * Property Override validation improvements (#17759) * Reject inbound Prop Override patch with Services Services filtering is only supported for outbound TrafficDirection patches. * Improve Prop Override unexpected type validation - Guard against additional invalid parent and target types - Add specific error handling for Any fields (unsupported) * Fixes (#17765) * Update license get explanation (#17782) This PR is to clarify what happens if the license get command is run on a follower if the leader hasn't been updated with a newer license. * Add Patch index to Prop Override validation errors (#17777) When a patch is found invalid, include its index for easier debugging when multiple patches are provided. * Stop referenced jwt providers from being deleted (#17755) * Stop referenced jwt providers from being deleted * Implement a Catalog Controllers Lifecycle Integration Test (#17435) * Implement a Catalog Controllers Lifecycle Integration Test * Prevent triggering the race detector. This allows defining some variables for protobuf constants and using those in comparisons. Without that, something internal in the fmt package ended up looking at the protobuf message size cache and triggering the race detector. * HCP Add node id/name to config (#17750) * Catalog V2 Container Based Integration Test (#17674) * Implement the Catalog V2 controller integration container tests This now allows the container tests to import things from the root module. However for now we want to be very restrictive about which packages we allow importing. * Add an upgrade test for the new catalog Currently this should be dormant and not executed. However its put in place to detect breaking changes in the future and show an example of how to do an upgrade test with integration tests structured like catalog v2. * Make testutil.Retry capable of performing cleanup operations These cleanup operations are executed after each retry attempt. * Move TestContext to taking an interface instead of a concrete testing.T This allows this to be used on a retry.R or generally anything that meets the interface. * Move to using TestContext instead of background contexts Also this forces all test methods to implement the Cleanup method now instead of that being an optional interface. Co-authored-by: Daniel Upton <daniel@floppy.co> * Fix Docs for Trails Leader By (#17763) * init * fix tests * added -detailed in docs * added change log * fix doc * checking for entry in map * fix tests * removed detailed flag * removed detailed flag * revert unwanted changes * removed unwanted changes * updated change log * pr review comment changes * pr comment changes single API instead of two * fix change log * fix tests * fix tests * fix test operator raft endpoint test * Update .changelog/17582.txt Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * nits * updated docs * explanation added * fix doc * fix docs --------- Co-authored-by: Semir Patel <semir.patel@hashicorp.com> * Improve Prop Override docs examples (#17799) - Provide more realistics examples for setting properties not already supported natively by Consul - Remove superfluous commas from HCL, correct target service name, and fix service defaults vs. proxy defaults in examples - Align existing integration test to updated docs * Test permissive mTLS filter chain not configured with tproxy disabled (#17747) * Add documentation for remote debugging of integration tests. (#17800) * Add documentation for remote debugging of integration tests. * add link from main docs page. * changes related to PR feedback * Clarify limitations of Prop Override extension (#17801) Explicitly document the limitations of the extension, particularly what kind of fields it is capable of modifying. * Fix formatting for webhook-certs Consul tutorial (#17810) * Fix formatting for webhook-certs Consul tutorial * Make a small grammar change to also pick up whitespace changes necessary for formatting --------- Co-authored-by: David Yu <dyu@hashicorp.com> * Add jwt-authn metrics to jwt-provider docs (#17816) * [NET-3095] add jwt-authn metrics docs * Change URLs for redirects from RC to default latest (#17822) * Set GOPRIVATE for all hashicorp repos in CI (#17817) Consistently set GOPRIVATE to include all hashicorp repos, s.t. private modules are successfully pulled in enterprise CI. * Make locality aware routing xDS changes (#17826) * Fixup consul-container/test/debugging.md (#17815) Add missing `-t` flag and fix minor typo. * fixes #17732 - AccessorID in request body should be optional when updating ACL token (#17739) * AccessorID in request body should be optional when updating ACL token * add a test case * fix test case * add changelog entry for PR #17739 * CA provider doc updates and Vault provider minor update (#17831) Update CA provider docs Clarify that providers can differ between primary and secondary datacenters Provide a comparison chart for consul vs vault CA providers Loosen Vault CA provider validation for RootPKIPath Update Vault CA provider documentation * ext-authz Envoy extension: support `localhost` as a valid target URI. (#17821) * CI Updates (#17834) * Ensure that git access to private repos uses the ELEVATED_GITHUB_TOKEN * Bump the runner size for the protobuf generation check This has failed previously when the runner process that communicates with GitHub gets starved causing the job to fail. * counter part of ent pr (#17618) * watch: support -filter for consul watch: checks, services, nodes, service (#17780) * watch: support -filter for watch checks * Add filter for watch nodes, services, and service - unit test added - Add changelog - update doc * Trigger OSS => ENT merge for all release branches (#17853) Previously, this only triggered for release/*.*.x branches; however, our release process involves cutting a release/1.16.0 branch, for example, at time of code freeze these days. Any PRs to that branch after code freeze today do not make their way to consul-enterprise. This will make behavior for a .0 branch consistent with current behavior for a .x branch. * Update service-mesh.mdx (#17845) Deleted two commas which looks quite like some leftovers. * Add docs for sameness groups with resolvers. (#17851) * docs: add note about path prefix matching behavior for HTTPRoute config (#17860) * Add note about path prefix matching behavior for HTTPRoute config * Update website/content/docs/connect/gateways/api-gateway/configuration/http-route.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: update upgrade to consul-dataplane docs on k8s (#17852) * resource: add `AuthorizerContext` helper method (#17393) * resource: enforce consistent naming of resource types (#17611) For consistency, resource type names must follow these rules: - `Group` must be snake case, and in most cases a single word. - `GroupVersion` must be lowercase, start with a "v" and end with a number. - `Kind` must be pascal case. These were chosen because they map to our protobuf type naming conventions. * tooling: generate protoset file (#17364) Extends the `proto` make target to generate a protoset file for use with grpcurl etc. * Fix a bug that wrongly trims domains when there is an overlap with DC name (#17160) * Fix a bug that wrongly trims domains when there is an overlap with DC name Before this change, when DC name and domain/alt-domain overlap, the domain name incorrectly trimmed from the query. Example: Given: datacenter = dc-test, alt-domain = test.consul. Querying for "test-node.node.dc-test.consul" will faile, because the code was trimming "test.consul" instead of just ".consul" This change, fixes the issue by adding dot (.) before trimming * trimDomain: ensure domain trimmed without modyfing original domains * update changelog --------- Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * deps: aws-sdk-go v1.44.289 (#17876) Signed-off-by: Dan Bond <danbond@protonmail.com> * api-gateway: add operation cannot be fulfilled error to common errors (#17874) * add error message * Update website/content/docs/api-gateway/usage/errors.mdx Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * fix formating issues --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * api-gateway: add step to upgrade instructions for creating intentions (#17875) * Changelog - add 1.13.9, 1.14.8, and 1.15.4 (#17889) * docs: update config enable_debug (#17866) * update doc for config enable_debug * Update website/content/docs/agent/config/config-files.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update wording on WAN fed and intermediate_pki_path (#17850) * Allow service identity tokens the ability to read jwt-providers (#17893) * Allow service identity tokens the ability to read jwt-providers * more tests * service_prefix tests * Update docs (#17476) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add emit_tags_as_labels to envoy bootstrap config when using Consul Telemetry Collector (#17888) * Fix command from kg to kubectl get (#17903) * Create and update release notes for 1.16 and 1.2 (#17895) * update release notes for 1.16 and 1.2 * update latest consul core release * Propose new changes to APIgw upgrade instructions (#17693) * Propose new changes to APIgw upgrade instructions * fix build error * update callouts to render correctly * Add hideClipboard to log messages * Added clarification around consul k8s and crds * Add workflow to verify linux release packages (#17904) * adding docker files to verify linux packages. * add verifr-release-linux.yml * updating name * pass inputs directly into jobs * add other linux package platforms * remove on push * fix TARGETARCH on debian and ubuntu so it can check arm64 and amd64 * fixing amazon to use the continue line * add ubuntu i386 * fix comment lines * working * remove commented out workflow jobs * Apply suggestions from code review Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * update fedora and ubuntu to use latest tag --------- Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> * Reference hashicorp/consul instead of consul for Docker image (#17914) * Reference hashicorp/consul instead of consul for Docker image * Update Make targets that pull consul directly * Update Consul K8s Upgrade Doc Updates (#17921) Updating upgrade procedures to encompass expected errors during upgrade process from v1.13.x to v1.14.x. * Update sameness-group.mdx (#17915) * Update create-sameness-groups.mdx (#17927) * deps: coredns v1.10.1 (#17912) * Ensure RSA keys are at least 2048 bits in length (#17911) * Ensure RSA keys are at least 2048 bits in length * Add changelog * update key length check for FIPS compliance * Fix no new variables error and failing to return when error exists from validating * clean up code for better readability * actually return value * tlsutil: Fix check TLS configuration (#17481) * tlsutil: Fix check TLS configuration * Rewording docs. * Update website/content/docs/services/configuration/checks-configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Fix typos and add changelog entry. --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Deprecations for connect-native SDK and specific connect native APIs (#17937) * Update v1_16_x.mdx * Update connect native golang page --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Revert "Add workflow to verify linux release packages (#17904)" (#17942) This reverts commit 3368f14fab500ebe9f6aeab5631dd1d5f5a453e5. * Fixes Secondary ConnectCA update (#17846) This fixes a bug that was identified which resulted in subsequent ConnectCA configuration update not to persist in the cluster. * fixing typo in link to jwt-validations-with-intentions doc (#17955) * Fix streaming backend link (#17958) * Fix streaming backend link * Update health.mdx * Dynamically create jwks clusters for jwt-providers (#17944) * website: remove deprecated agent rpc docs (#17962) * Fix missing BalanceOutboundConnections in v2 catalog. (#17964) * feature - [NET - 4005] - [Supportability] Reloadable Configuration - enable_debug (#17565) * # This is a combination of 9 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same * # This is a combination of 12 commits. # This is the 1st commit message: init without tests # This is the commit message #2: change log # This is the commit message #3: fix tests # This is the commit message #4: fix tests # This is the commit message #5: added tests # This is the commit message #6: change log breaking change # This is the commit message #7: removed breaking change # This is the commit message #8: fix test # This is the commit message #9: keeping the test behaviour same # This is the commit message #10: made enable debug atomic bool # This is the commit message #11: fix lint # This is the commit message #12: fix test true enable debug * parent 10f500e895d92cc3691ade7b74a33db755d22039 author absolutelightning <ashesh.vidyut@hashicorp.com> 1687352587 +0530 committer absolutelightning <ashesh.vidyut@hashicorp.com> 1687352592 +0530 init without tests change log fix tests fix tests added tests change log breaking change removed breaking change fix test keeping the test behaviour same made enable debug atomic bool fix lint fix test true enable debug using enable debug in agent as atomic bool test fixes fix tests fix tests added update on correct locaiton fix tests fix reloadable config enable debug fix tests fix init and acl 403 * revert commit * Fix formatting codeblocks on APIgw docs (#17970) * fix formatting codeblocks * remove unnecessary indents * Remove POC code (#17974) * update doc (#17910) * update doc * update link * Remove duplicate and unused newDecodeConfigEntry func (#17979) * docs: samenessGroup YAML examples (#17984) * configuration entry syntax * Example config * Add changelog entry for 1.16.0 (#17987) * Fix typo (#17198) servcies => services * Expose JWKS cluster config through JWTProviderConfigEntry (#17978) * Expose JWKS cluster config through JWTProviderConfigEntry * fix typos, rename trustedCa to trustedCA * Integration test for ext-authz Envoy extension (#17980) * Fix incorrect protocol for transparent proxy upstreams. (#17894) This PR fixes a bug that was introduced in: https://github.com/hashicorp/consul/pull/16021 A user setting a protocol in proxy-defaults would cause tproxy implicit upstreams to not honor the upstream service's protocol set in its `ServiceDefaults.Protocol` field, and would instead always use the proxy-defaults value. Due to the fact that upstreams configured with "tcp" can successfully contact upstream "http" services, this issue was not recognized until recently (a proxy-defaults with "tcp" and a listening service with "http" would make successful requests, but not the opposite). As a temporary work-around, users experiencing this issue can explicitly set the protocol on the `ServiceDefaults.UpstreamConfig.Overrides`, which should take precedence. The fix in this PR removes the proxy-defaults protocol from the wildcard upstream that tproxy uses to configure implicit upstreams. When the protocol was included, it would always overwrite the value during discovery chain compilation, which was not correct. The discovery chain compiler also consumes proxy defaults to determine the protocol, so simply excluding it from the wildcard upstream config map resolves the issue. * feat: include nodes count in operator usage endpoint and cli command (#17939) * feat: update operator usage api endpoint to include nodes count * feat: update operator usange cli command to includes nodes count * [OSS] Improve Gateway Test Coverage of Catalog Health (#18011) * fix(cli): remove failing check from 'connect envoy' registration for api gateway * test(integration): add tests to check catalog statsus of gateways on startup * remove extra sleep comment * Update test/integration/consul-container/libs/assert/service.go * changelog * Fixes Traffic rate limitting docs (#17997) * Fix removed service-to-service peering links (#17221) * docs: fix removed service-to-service peering links * docs: extend peering-via-mesh-gateways intro (thanks @trujillo-adam) --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs: Sameness "beta" warning (#18017) * Warning updates * .x * updated typo in tab heading (#18022) * updated typo in tab heading * updated tab group typo, too * Document that DNS lookups can target cluster peers (#17990) Static DNS lookups, in addition to explicitly targeting a datacenter, can target a cluster peer. This was added in 95dc0c7b301b70a6b955a8b7c9737c9b86f03df6 but didn't make the documentation. The driving function for the change is `parseLocality` here: https://github.com/hashicorp/consul/blob/0b1299c28d8127129d61310ee4280055298438e0/agent/dns_oss.go#L25 The biggest change in this is to adjust the standard lookup syntax to tie `.<datacenter>` to `.dc` as required-together, and to append in the similar `.<cluster-peer>.peer` optional argument, both to A record and SRV record lookups. Co-authored-by: David Yu <dyu@hashicorp.com> * Add first integration test for jwt auth with intention (#18005) * fix stand-in text for name field (#18030) * removed sameness conf entry from failover nav (#18033) * docs - add service sync annotations and k8s service weight annotation (#18032) * Docs for https://github.com/hashicorp/consul-k8s/pull/2293 * remove versions for enterprise features since they are old --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * docs - add jobs use case for service mesh k8s (#18037) * docs - add jobs use case for service mesh k8s * add code blocks * address feedback (#18045) * Add verify server hostname to tls default (#17155) * [OSS] Fix initial_fetch_timeout to wait for all xDS resources (#18024) * fix(connect): set initial_fetch_time to wait indefinitely * changelog * PR feedback 1 * ui: fix typos for peer service imports (#17999) * test: fix FIPS inline cert test message (#18076) * Fix a couple typos in Agent Telemetry Metrics docs (#18080) * Fix metrics docs * Add changelog Signed-off-by: josh <josh.timmons@hashicorp.com> --------- Signed-off-by: josh <josh.timmons@hashicorp.com> * docs updates - cluster peering and virtual services (#18069) * Update route-to-virtual-services.mdx * Update establish-peering.mdx * Update service-mesh-compare.mdx (#17279) grammar change * Update helm docs on main (#18085) * ci: use gotestsum v1.10.1 [NET-4042] (#18088) * Docs: Update proxy lifecycle annotations and consul-dataplane flags (#18075) * Update proxy lifecycle annotations and consul-dataplane flags * Pass configured role name to Vault for AWS auth in Connect CA (#17885) * Docs for dataplane upgrade on k8s (#18051) * Docs for dataplane upgrade on k8s --------- Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * docs - update upgrade index page to not recommend consul leave. (#18100) * Displays Consul version of each nodes in UI nodes section (#17754) * update UINodes and UINodeInfo response with consul-version info added as NodeMeta, fetched from serf members * update test cases TestUINodes, TestUINodeInfo * added nil check for map * add consul-version in local agent node metadata * get consul version from serf member and add this as node meta in catalog register request * updated ui mock response to include consul versions as node meta * updated ui trans and added version as query param to node list route * updates in ui templates to display consul version with filter and sorts * updates in ui - model class, serializers,comparators,predicates for consul version feature * added change log for Consul Version Feature * updated to get version from consul service, if for some reason not available from serf * updated changelog text * updated dependent testcases * multiselection version filter * Update agent/consul/state/catalog.go comments updated Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --------- Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * api gw 1.16 updates (#18081) * api gw 1.16 updates * Apply suggestions from code review Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * update CodeBlockConfig filename * Apply suggestions from code review Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> * remove non-standard intentions page * Update website/content/docs/api-gateway/configuration/index.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --------- Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [NET-4103] ci: build s390x (#18067) * ci: build s390x * ci: test s390x * ci: dev build s390x * no GOOS * ent only * build: publish s390x * fix syntax error * fix syntax error again * fix syntax error again x2 * test branch * Move s390x conditionals to step level * remove test branch --------- Co-authored-by: emilymianeil <eneil@hashicorp.com> * :ermahgerd "Sevice Mesh" -> "Service Mesh" (#18116) Just a typo in the docs. * Split pbmesh.UpstreamsConfiguration as a resource out of pbmesh.Upstreams (#17991) Configuration that previously was inlined into the Upstreams resource applies to both explicit and implicit upstreams and so it makes sense to split it out into its own resource. It also has other minor changes: - Renames `proxy.proto` proxy_configuration.proto` - Changes the type of `Upstream.destination_ref` from `pbresource.ID` to `pbresource.Reference` - Adds comments to fields that didn't have them * [NET-4895] ci - api tests and consul container tests error because of dependency bugs with go 1.20.6. Pin go to 1.20.5. (#18124) ### Description The following jobs started failing when go 1.20.6 was released: - `go-test-api-1-19` - `go-test-api-1-20` - `compatibility-integration-tests` - `upgrade-integration-tests` `compatibility-integration-tests` and `compatibility-integration-tests` to this testcontainers issue: https://github.com/testcontainers/testcontainers-go/issues/1359. This issue calls for testcontainers to release a new version when one of their dependencies is fixed. When that is done, we will unpin the go versions in `compatibility-integration-tests` and `compatibility-integration-tests`. ### Testing & Reproduction steps See these jobs broken in CI and then see them work with this PR. --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * Add ingress gateway deprecation notices to docs (#18102) ### Description This adds notices, that ingress gateway is deprecated, to several places in the product docs where ingress gateway is the topic. ### Testing & Reproduction steps Tested with a local copy of the website. ### Links Deprecation of ingress gateway was announced in the Release Notes for Consul 1.16 and Consul-K8s 1.2. See: [https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul/v1_16_x#what-s-deprecated ) [https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated](https://developer.hashicorp.com/consul/docs/release-notes/consul-k8s/v1_2_x#what-s-deprecated) ### PR Checklist * [N/A] updated test coverage * [X] external facing docs updated * [X] appropriate backport labels added * [X] not a security concern --------- Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Add docs for jwt cluster configuration (#18004) ### Description <!-- Please describe why you're making this change, in plain English. --> - Add jwt-provider docs for jwks cluster configuration. The configuration was added here: https://github.com/hashicorp/consul/pull/17978 * Docs: fix unmatched bracket for health checks page (#18134) * NET-4657/add resource service client (#18053) ### Description <!-- Please describe why you're making this change, in plain English. --> Dan had already started on this [task](https://github.com/hashicorp/consul/pull/17849) which is needed to start building the HTTP APIs. This just needed some cleanup to get it ready for review. Overview: - Rename `internalResourceServiceClient` to `insecureResourceServiceClient` for name consistency - Configure a `secureResourceServiceClient` with auth enabled ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Fix bug with Vault CA provider (#18112) Updating RootPKIPath but not IntermediatePKIPath would not update leaf signing certs with the new root. Unsure if this happens in practice but manual testing showed it is a bug that would break mesh and agent connections once the old root is pruned. * [NET-4897] net/http host header is now verified and request.host that contains socked now error (#18129) ### Description This is related to https://github.com/hashicorp/consul/pull/18124 where we pinned the go versions in CI to 1.20.5 and 1.19.10. go 1.20.6 and 1.19.11 now validate request host headers for validity, including the hostname cannot be prefixed with slashes. For local communications (npipe://, unix://), the hostname is not used, but we need valid and meaningful hostname. Prior versions go Go would clean the host header, and strip slashes in the process, but go1.20.6 and go1.19.11 no longer do, and reject the host header. Around the community we are seeing that others are intercepting the req.host and if it starts with a slash or ends with .sock, they changing the host to localhost or another dummy value. [client: define a "dummy" hostname to use for local connections by thaJeztah · Pull Request #45942 · moby/moby](https://github.com/moby/moby/pull/45942) ### Testing & Reproduction steps Check CI tests. ### Links * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * add a conditional around setting LANFilter.AllSegments to make sure it is valid (#18139) ### Description This is to correct a code problem because this assumes all segments, but when you get to Enterprise, you can be in partition that is not the default partition, in which case specifying all segments does not validate and fails. This is to correct the setting of this filter with `AllSegments` to `true` to only occur when in the the `default` partition. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> ### PR Checklist * [ ] updated test coverage * [ ] external facing docs updated * [ ] appropriate backport labels added * [ ] not a security concern * chore: bump upgrade integrations tests to 1.15, 116 [NET-4743] (#18130) * re org resource type registry (#18133) * fix: update delegateMock used in ENT (#18149) ### Description <!-- Please describe why you're making this change, in plain English. --> The mock is used in `http_ent_test` file which caused lint failures. For OSS->ENT parity adding the same change here. ### Links <!-- Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. --> Identified in OSS->ENT [merge PR](https://github.com/hashicorp/consul-enterprise/pull/6328) ### PR Checklist * [ ] ~updated test coverage~ * [ ] ~external facing docs updated~ * [x] appropriate backport labels added * [ ] ~not a security concern~ * Use JWT-auth filter in metadata mode & Delegate validation to RBAC filter (#18062) ### Description <!-- Please describe why you're making this change, in plain English. --> - Currently the jwt-auth filter doesn't take into account the service identity when validating jwt-auth, it only takes into account the path and jwt provider during validation. This causes issues when multiple source intentions restrict access to an endpoint with different JWT providers. - To fix these issues, rather than use the JWT auth filter for validation, we use it in metadata mode and allow it to forward the successful validated JWT token payload to the RBAC filter which will make the decisions. This PR ensures requests with and without JWT tokens successfully go through the jwt-authn filter. The filter however only forwards the data for successful/valid tokens. On the RBAC filter level, we check the payload for claims and token issuer + existing rbac rules. ### Testing & Reproduction steps <!-- * In the case of bugs, describe how to replicate * If any manual tests were done, document the steps and the conditions to replicate * Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding --> - This test covers a multi level jwt requirements (requirements at top level and permissions level). It also assumes you have envoy running, you have a redis and a sidecar proxy service registered, and have a way to generate jwks with jwt. I mostly use: https://www.scottbrady91.com/tools/jwt for this. - first write your proxy defaults ``` Kind = "proxy-defaults" name = "global" config { protocol = "http" } ``` - Create two providers ``` Kind = "jwt-provider" Name = "auth0" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjog....." } } ``` ``` Kind = "jwt-provider" Name = "okta" Issuer = "https://ronald.local" JSONWebKeySet = { Local = { JWKS = "eyJrZXlzIjogW3...." } } ``` - add a service intention ``` Kind = "service-intentions" Name = "redis" JWT = { Providers = [ { Name = "okta" }, ] } Sources = [ { Name = "*" Permissions = [{ Action = "allow" HTTP = { PathPrefix = "/workspace" } JWT = { Providers = [ { Name = "okta" VerifyClaims = [ { Path = ["aud"] Value = "my_client_app" }, { Path = ["sub"] Value = "5be86359073c434bad2da3932222dabe" } ] }, ] } }, { Action = "allow" HTTP = { PathPrefix = "/" } JWT = { Providers = [ { Name = "auth0" }, ] } }] } ] ``` - generate 3 jwt tokens: 1 from auth0 jwks, 1 from okta jwks with different claims than `/workspace` expects and 1 with correct claims - connect to your envoy (change service and address as needed) to view logs and potential errors. You can add: `-- --log-level debug` to see what data is being forwarded ``` consul connect envoy -sidecar-for redis1 -grpc-addr 127.0.0.1:8502 ``` - Make the following requests: ``` curl -s -H "Authorization: Bearer $Auth0_TOKEN" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_wrong_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v RBAC filter denied curl -s -H "Authorization: Bearer $Okta_TOKEN_with_correct_claims" --insecure --cert leaf.cert --key leaf.key --cacert connect-ca.pem https://localhost:20000/workspace -v Successful request ``` ### TODO * [x] Update test coverage * [ ] update integration tests (follow-up PR) * [x] appropriate backport labels added * Support Consul Connect Envoy Command on Windows (#17694) ### Description Add support for consul connect envoy command on windows. This PR fixes the comments of PR - https://github.com/hashicorp/consul/pull/15114 ### Testing * Built consul.exe from this branch on windows and hosted here - [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Updated the [tutorial](https://developer.hashicorp.com/consul/tutorials/developer-mesh/consul-windows-workloads) and changed the `consul_url.default` value to [AWS S3](https://asheshvidyut-bucket.s3.ap-southeast-2.amazonaws.com/consul.zip) * Followed the steps in the tutorial and verified that everything is working as described. ### PR Checklist * [x] updated test coverage * [ ] external facing docs updated * [x] appropriate backport labels added * [x] not a security concern --------- Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * Change docs to say 168h instead of 7d for server_rejoin_age_max (#18154) ### Description Addresses https://github.com/hashicorp/consul/pull/17171#issuecomment-1636930705 * [OSS] test: improve xDS listener code coverage (#18138) test: improve xDS listener code coverage * Re-order expected/actual for assertContainerState in consul container tests (#18157) Re-order expected/actual, consul container tests * group and document make file (#17943) * group and document make file * Add `testing/deployer` (neé `consul-topology`) [NET-4610] (#17823) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> * [NET-4792] Add integrations tests for jwt-auth (#18169) * Add FIPS reference to consul enterprise docs (#18028) * Add FIPS reference to consul enterprise docs * Update website/content/docs/enterprise/index.mdx Co-authored-by: David Yu <dyu@hashicorp.com> * remove support for ecs client (fips) --------- Co-authored-by: David Yu <dyu@hashicorp.com> * add peering_commontopo tests [NET-3700] (#17951) Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> * docs - remove Sentinel from enterprise features list (#18176) * Update index.mdx * Update kv.mdx * Update docs-nav-data.json * delete sentinel.mdx * Update redirects.js --------- Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> * [NET-4865] Bump golang.org/x/net to 0.12.0 (#18186) Bump golang.org/x/net to 0.12.0 While not necessary to directly address CVE-2023-29406 (which should be handled by using a patched version of Go when building), an accompanying change to HTTP/2 error handling does impact agent code. See https://go-review.googlesource.com/c/net/+/506995 for the HTTP/2 change. Bump this dependency across our submodules as well for the sake of potential indirect consumers of `x/net/http`. * Call resource mutate hook before validate hook (NET-4907) (#18178) * [NET-4865] security: Update Go version to 1.20.6 (#18190) Update Go version to 1.20.6 This resolves [CVE-2023-29406] (https://nvd.nist.gov/vuln/detail/CVE-2023-29406) for uses of the `net/http` standard library. Note that until the follow-up to #18124 is done, the version of Go used in those impacted tests will need to remain on 1.20.5. * Improve XDS test coverage: JWT auth edition (#18183) * Improve XDS test coverage: JWT auth edition more tests * test: xds coverage for jwt listeners --------- Co-authored-by: DanStough <dan.stough@hashicorp.com> * update readme.md (#18191) u[date readme.md * Update submodules to latest following 1.16.0 (#18197) Align all our internal use of submodules on the latest versions. * SEC-090: Automated trusted workflow pinning (2023-07-18) (#18174) Result of tsccr-helper -log-level=info -pin-all-workflows . Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> * Fix Backport Assistant PR commenting (#18200) * Fix Backport Assistant failure PR commenting For general comments on a PR, it looks like you have to use the `/issue` endpoint rather than `/pulls`, which requires commit/other review-specific target details. This matches the endpoint used in `backport-reminder.yml`. * Remove Backport Reminder workflow This is noisy (even when adding multiple labels, individual comments per label are generated), and likely no longer needed: we haven't had this work in a long time due to an expired GH token, and we now have better automation for backport PR assignment. * resource: Pass resource to Write ACL hook instead of just resource Id [NET-4908] (#18192) * Explicitly enable WebSocket upgrades (#18150) This PR explicitly enables WebSocket upgrades in Envoy's UpgradeConfig for all proxy types. (API Gateway, Ingress, and Sidecar.) Fixes #8283 * docs: fix the description of client rpc (#18206) * NET-4804: Add dashboard for monitoring consul-k8s (#18208) * [OSS] Improve xDS Code Coverage - Clusters (#18165) test: improve xDS cluster code coverage * NET-4222 take config file consul container (#18218) Net 4222 take config file consul container * Envoy Integration Test Windows (#18007) * [CONSUL-395] Update check_hostport and Usage (#40) * [CONSUL-397] Copy envoy binary from Image (#41) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix --------- Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * fix typos and update ecs compat table (#18215) * fix typos and update ecs compat table * real info for the ecs compat matrix table * Update website/content/docs/ecs/compatibility.mdx Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> --------- Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * [OSS] proxystate: add proxystate protos (#18216) * proxystate: add proxystate protos to pbmesh and resolve imports and conflicts between message names * ci: don't verify s390x (#18224) * [CC-5718] Remove HCP token requirement during bootstrap (#18140) * [CC-5718] Remove HCP token requirement during bootstrap * Re-add error for loading HCP management token * Remove old comment * Add changelog entry * Remove extra validation line * Apply suggestions from code review Co-authored-by: lornasong <lornasong@users.noreply.github.com> --------- Co-authored-by: lornasong <lornasong@users.noreply.github.com> * [NET-4122] Doc guidance for federation with externalServers (#18207) Doc guidance for federation with externalServers Add guidance for proper configuration when joining to a secondary cluster using WAN fed with external servers also enabled. Also clarify federation requirements and fix formatting for an unrelated value. Update both the Helm chart reference (synced from `consul-k8s`, see hashicorp/consul-k8s#2583) and the docs on using `externalServers`. * [OSS] Improve xDS Code Coverage - Endpoints and Misc (#18222) test: improve xDS endpoints code coverage * Clarify license reporting timing and GDPR compliance (#18237) Add Alicia's edits to clarify log timing and other details * Fix Github Workflow File (#18241) * [CONSUL-382] Support openssl in unique test dockerfile (#43) * [CONSUL-405] Add bats to single container (#44) * [CONSUL-414] Run Prometheus Test Cases and Validate Changes (#46) * [CONSUL-410] Run Jaeger in Single container (#45) * [CONSUL-412] Run test-sds-server in single container (#48) * [CONSUL-408] Clean containers (#47) * [CONSUL-384] Rebase and sync fork (#50) * [CONSUL-415] Create Scenarios Troubleshooting Docs (#49) * [CONSUL-417] Update Docs Single Container (#51) * [CONSUL-428] Add Socat to single container (#54) * [CONSUL-424] Replace pkill in kill_envoy function (#52) * [CONSUL-434] Modify Docker run functions in Helper script (#53) * [CONSUL-435] Replace docker run in set_ttl_check_state & wait_for_agent_service_register functions (#55) * [CONSUL-438] Add netcat (nc) in the Single container Dockerfile (#56) * [CONSUL-429] Replace Docker run with Docker exec (#57) * [CONSUL-436] Curl timeout and run tests (#58) * [CONSUL-443] Create dogstatsd Function (#59) * [CONSUL-431] Update Docs Netcat (#60) * [CONSUL-439] Parse nc Command in function (#61) * [CONSUL-463] Review curl Exec and get_ca_root Func (#63) * [CONSUL-453] Docker hostname in Helper functions (#64) * [CONSUL-461] Test wipe volumes without extra cont (#66) * [CONSUL-454] Check ports in the Server and Agent containers (#65) * [CONSUL-441] Update windows dockerfile with version (#62) * [CONSUL-466] Review case-grpc Failing Test (#67) * [CONSUL-494] Review case-cfg-resolver-svc-failover (#68) * [CONSUL-496] Replace docker_wget & docker_curl (#69) * [CONSUL-499] Cleanup Scripts - Remove nanoserver (#70) * [CONSUL-500] Update Troubleshooting Docs (#72) * [CONSUL-502] Pull & Tag Envoy Windows Image (#73) * [CONSUL-504] Replace docker run in docker_consul (#76) * [CONSUL-505] Change admin_bind * [CONSUL-399] Update envoy to 1.23.1 (#78) * [CONSUL-510] Support case-wanfed-gw on Windows (#79) * [CONSUL-506] Update troubleshooting Documentation (#80) * [CONSUL-512] Review debug_dump_volumes Function (#81) * [CONSUL-514] Add zipkin to Docker Image (#82) * [CONSUL-515] Update Documentation (#83) * [CONSUL-529] Support case-consul-exec (#86) * [CONSUL-530] Update Documentation (#87) * [CONSUL-530] Update default consul version 1.13.3 * [CONSUL-539] Cleanup (#91) * [CONSUL-546] Scripts Clean-up (#92) * [CONSUL-491] Support admin_access_log_path value for Windows (#71) * [CONSUL-519] Implement mkfifo Alternative (#84) * [CONSUL-542] Create OS Specific Files for Envoy Package (#88) * [CONSUL-543] Create exec_supported.go (#89) * [CONSUL-544] Test and Build Changes (#90) * Implement os.DevNull * using mmap instead of disk files * fix import in exec-unix * fix nmap open too many arguemtn * go fmt on file * changelog file * fix go mod * Update .changelog/17694.txt Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * different mmap library * fix bootstrap json * some fixes * chocolatey version fix and image fix * using different library * fix Map funciton call * fix mmap call * fix tcp dump * fix tcp dump * windows tcp dump * Fix docker run * fix tests * fix go mod * fix version 16.0 * fix version * fix version dev * sleep to debug * fix sleep * fix permission issue * fix permission issue * fix permission issue * fix command * fix command * fix funciton * fix assert config entry status command not found * fix command not found assert_cert_has_cn * fix command not found assert_upstream_missing * fix command not found assert_upstream_missing_once * fix command not found get_upstream_endpoint * fix command not found get_envoy_public_listener_once * fix command not found * fix test cases * windows integration test workflow github * made code similar to unix using npipe * fix go.mod * fix dialing of npipe * dont wait * check size of written json * fix undefined n * running * fix dep * fix syntax error * fix workflow file * windows runner * fix runner * fix from json * fix runs on * merge connect envoy * fix cin path * build * fix file name * fix file name * fix dev build * remove unwanted code * fix upload * fix bin name * fix path * checkout current branch * fix path * fix tests * fix shell bash for windows sh files * fix permission of run-test.sh * removed docker dev * added shell bash for tests * fix tag * fix win=true * fix cd * added dev * fix variable undefined * removed failing tests * fix tcp dump image * fix curl * fix curl * tcp dump path * fix tcpdump path * fix curl * fix curl install * stop removing intermediate containers * fix tcpdump docker image * revert -rm * --rm=false * makeing docker image before * fix tcpdump * removed case consul exec * removed terminating gateway simple * comment case wasm * removed data dog * comment out upload coverage * uncomment case-consul-exec * comment case consul exec * if always * logs * using consul 1.17.0 * fix quotes * revert quotes * redirect to dev null * Revert version * revert consul connect * fix version * removed envoy connect * not using function * change log * docker logs * fix logs * restructure bad authz * rmeoved dev null * output * fix file descriptor * fix cacert * fix cacert * fix ca cert * cacert does not work in windows curl * fix func * removed docker logs * added sleep * fix tls * commented case-consul-exec * removed echo * retry docker consul * fix upload bin * uncomment consul exec * copying consul.exe to docker image * copy fix * fix paths * fix path * github workspace path * latest version * Revert "latest version" This reverts commit 5a7d7b82d9e7553bcb01b02557ec8969f9deba1d. * commented consul exec * added ssl revoke best effort * revert best effort * removed unused files * rename var name and change dir * windows runner * permission * needs setup fix * swtich to github runner * fix file path * fix path * fix path * fix path * fix path * fix path * fix build paths * fix tag * nightly runs * added matrix in github workflow, renamed files * fix job * fix matrix * removed brackes * from json * without using job matrix * fix quotes * revert job matrix * fix workflow * fix comment * added comment * nightly runs * removed datadog ci as it is already measured in linux one * running test * Revert "running test" This reverts commit 7013d15a23732179d18ec5d17336e16b26fab5d4. * pr comment fixes * running test now * running subset of test * running subset of test * job matrix * shell bash * removed bash shell * linux machine for job matrix * fix output * added cat to debug * using ubuntu latest * fix job matrix * fix win true * fix go test * revert job matrix * Fix tests --------- Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: joselo85 <joseignaciolorenzo85@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> * NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236) * Align build arch matrix with enterprise (#18235) Ensure that OSS remains in sync w/ Enterprise by aligning the format of arch matrix args for various build jobs. * Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes" (#18248) Revert "NET-4996 - filter go-tests and test-integration workflows from running on docs only and ui only changes (#18236)" This reverts commit a11dba710e6ce6f172c0fa6c9b61567cc1efffc8. * resource: Add scope to resource type registration [NET-4976] (#18214) Enables querying a resource type's registration to determine if a resource is cluster, partition, or partition and namespace scoped. * Fix some inconsistencies in jwt docs (#18234) * NET-1825: More new ACL token creation docs (#18063) Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * [CC-5719] Add support for builtin global-read-only policy * Add changelog * Add read-only to docs * Fix some minor issues. * Change from ReplaceAll to Sprintf * Change IsValidPolicy name to return an error instead of bool * Fix PolicyList test * Fix other tests * Apply suggestions from code review Co-authored-by: Paul Glass <pglass@hashicorp.com> * Fix state store test for policy list. * Fix naming issues * Update acl/validation.go Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> * backport of commit d63fa5481dc02c6faae7cc2647b4073b3286af1d * backport of commit 3d099a6ed8ed10b6dc464c466cb1668914db8f08 --------- Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Dan Bond <danbond@protonmail.com> Signed-off-by: josh <josh.timmons@hashicorp.com> Co-authored-by: Michael Zalimeni <michael.zalimeni@hashicorp.com> Co-authored-by: Ronald <roncodingenthusiast@users.noreply.github.com> Co-authored-by: Eric Haberkorn <erichaberkorn@gmail.com> Co-authored-by: Andrew Stucki <andrew.stucki@hashicorp.com> Co-authored-by: Luke Kysow <1034429+lkysow@users.noreply.github.com> Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Co-authored-by: David Yu <dyu@hashicorp.com> Co-authored-by: Bryce Kalow <bkalow@hashicorp.com> Co-authored-by: Paul Glass <pglass@hashicorp.com> Co-authored-by: Matt Keeler <mkeeler@users.noreply.github.com> Co-authored-by: Nathan Coleman <nathan.coleman@hashicorp.com> Co-authored-by: Poonam Jadhav <poonam.jadhav@hashicorp.com> Co-authored-by: Tu Nguyen <im2nguyen@users.noreply.github.com> Co-authored-by: Chris Thain <32781396+cthain@users.noreply.github.com> Co-authored-by: Hariram Sankaran <56744845+ramramhariram@users.noreply.github.com> Co-authored-by: shanafarkas <105076572+shanafarkas@users.noreply.github.com> Co-authored-by: Thomas Eckert <teckert@hashicorp.com> Co-authored-by: Jeff Apple <79924108+Jeff-Apple@users.noreply.github.com> Co-authored-by: Joshua Timmons <josh.timmons@hashicorp.com> Co-authored-by: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: Curt Bushko <cbushko@gmail.com> Co-authored-by: Tobias Birkefeld <t@craxs.de> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Co-authored-by: Semir Patel <semir.patel@hashicorp.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chappie <6537530+chapmanc@users.noreply.github.com> Co-authored-by: Derek Menteer <105233703+hashi-derek@users.noreply.github.com> Co-authored-by: John Murret <john.murret@hashicorp.com> Co-authored-by: Mark Campbell-Vincent <mnmvincent@gmail.com> Co-authored-by: Daniel Upton <daniel@floppy.co> Co-authored-by: Steven Zamborsky <97125550+stevenzamborsky@users.noreply.github.com> Co-authored-by: George Bolo <george.bolo@gmail.com> Co-authored-by: Chris S. Kim <ckim@hashicorp.com> Co-authored-by: wangxinyi7 <121973291+wangxinyi7@users.noreply.github.com> Co-authored-by: cskh <hui.kang@hashicorp.com> Co-authored-by: V. K <cn007b@gmail.com> Co-authored-by: Iryna Shustava <ishustava@users.noreply.github.com> Co-authored-by: Alex Simenduev <shamil.si@gmail.com> Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: Dan Bond <danbond@protonmail.com> Co-authored-by: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Co-authored-by: Gerard Nguyen <gerard@hashicorp.com> Co-authored-by: mr-miles <miles.waller@gmail.com> Co-authored-by: natemollica-dev <57850649+natemollica-nm@users.noreply.github.com> Co-authored-by: John Maguire <john.maguire@hashicorp.com> Co-authored-by: Samantha <hello@entropy.cat> Co-authored-by: Ranjandas <thejranjan@gmail.com> Co-authored-by: Evan Phoenix <evan@phx.io> Co-authored-by: Michael Hofer <karras@users.noreply.github.com> Co-authored-by: J.C. Jones <james.jc.jones@gmail.com> Co-authored-by: Fulvio <fulviodenza823@gmail.com> Co-authored-by: Krastin Krastev <krastin@hashicorp.com> Co-authored-by: david3a <49253132+david3a@users.noreply.github.com> Co-authored-by: Nick Irvine <115657443+nfi-hashicorp@users.noreply.github.com> Co-authored-by: Tom Davies <tom@t-davies.com> Co-authored-by: Vijay <vijayraghav22@gmail.com> Co-authored-by: Eddie Rowe <74205376+eddie-rowe@users.noreply.github.com> Co-authored-by: emilymianeil <eneil@hashicorp.com> Co-authored-by: nv-hashi <80716011+nv-hashi@users.noreply.github.com> Co-authored-by: Franco Bruno Lavayen <cocolavayen@gmail.com> Co-authored-by: Jose Ignacio Lorenzo <74208929+joselo85@users.noreply.github.com> Co-authored-by: Jose Ignacio Lorenzo <joseignaciolorenzo85@gmail.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com> Co-authored-by: hashicorp-tsccr[bot] <129506189+hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: hashicorp-tsccr[bot] <hashicorp-tsccr[bot]@users.noreply.github.com> Co-authored-by: Blake Covarrubias <blake@covarrubi.as> Co-authored-by: Ivan K Berlot <ivanberlot@gmail.com> Co-authored-by: Ezequiel Fernández Ponce <20102608+ezfepo@users.noreply.github.com> Co-authored-by: Ezequiel Fernández Ponce <ezequiel.fernandez@southworks.com> Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com> Co-authored-by: Jeremy Jacobson <jjacobson93@users.noreply.github.com> Co-authored-by: lornasong <lornasong@users.noreply.github.com> Co-authored-by: Judith Malnick <judith@hashicorp.com> Co-authored-by: Jeremy Jacobson <jeremy.jacobson@hashicorp.com>
2023-08-01 17:37:13 +00:00
if builtinPolicy, ok := structs.ACLBuiltinPolicies[policy.ID]; ok {
return fmt.Errorf("Delete operation not permitted on the builtin %s policy", builtinPolicy.Name)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
req := structs.ACLPolicyBatchDeleteRequest{
PolicyIDs: []string{args.PolicyID},
}
_, err = a.srv.raftApply(structs.ACLPolicyDeleteRequestType, &req)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return fmt.Errorf("Failed to apply policy delete request: %v", err)
}
a.srv.ACLResolver.cache.RemovePolicy(policy.ID)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
2020-08-21 15:42:37 +00:00
*reply = policy.Name
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return nil
}
func (a *ACL) PolicyList(args *structs.ACLPolicyListRequest, reply *structs.ACLPolicyListResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.PolicyList", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
2014-08-12 22:32:44 +00:00
}
var authzContext acl.AuthorizerContext
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
if err != nil {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
2017-04-21 00:46:29 +00:00
func(ws memdb.WatchSet, state *state.Store) error {
index, policies, err := state.ACLPolicyList(ws, &args.EnterpriseMeta)
if err != nil {
return err
}
// filter down to just what the requester has permissions to see
a.srv.filterACLWithAuthorizer(authz, &policies)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
var stubs structs.ACLPolicyListStubs
for _, policy := range policies {
stubs = append(stubs, policy.Stub())
2014-08-06 00:05:59 +00:00
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
reply.Index, reply.Policies = index, stubs
return nil
2014-08-06 00:05:59 +00:00
})
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
// PolicyResolve is used to retrieve a subset of the policies associated with a given token
// The policy ids in the args simply act as a filter on the policy set assigned to the token
func (a *ACL) PolicyResolve(args *structs.ACLPolicyBatchGetRequest, reply *structs.ACLPolicyBatchResponse) error {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err := a.aclPreCheck(); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.PolicyResolve", args, reply); done {
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return err
}
// get full list of policies for this token
identity, policies, err := a.srv.ACLResolver.resolveTokenToIdentityAndPolicies(args.Token)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
if err != nil {
return err
}
entIdentity, entPolicies, err := a.srv.ACLResolver.resolveEnterpriseIdentityAndPolicies(identity)
if err != nil {
return err
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
idMap := make(map[string]*structs.ACLPolicy)
for _, policyID := range identity.PolicyIDs() {
idMap[policyID] = nil
}
if entIdentity != nil {
for _, policyID := range entIdentity.PolicyIDs() {
idMap[policyID] = nil
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
for _, policy := range policies {
idMap[policy.ID] = policy
}
for _, policy := range entPolicies {
idMap[policy.ID] = policy
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
for _, policyID := range args.PolicyIDs {
if policy, ok := idMap[policyID]; ok {
// only add non-deleted policies
if policy != nil {
reply.Policies = append(reply.Policies, policy)
}
} else {
// send a permission denied to indicate that the request included
// policy ids not associated with this token
return acl.ErrPermissionDenied
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
}
}
a.srv.SetQueryMeta(&reply.QueryMeta, args.Token)
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
return nil
}
// ReplicationStatus is used to retrieve the current ACL replication status.
func (a *ACL) ReplicationStatus(args *structs.DCSpecificRequest,
reply *structs.ACLReplicationStatus) error {
// This must be sent to the leader, so we fix the args since we are
// re-using a structure where we don't support all the options.
args.RequireConsistent = true
args.AllowStale = false
if done, err := a.srv.ForwardRPC("ACL.ReplicationStatus", args, reply); done {
return err
}
// There's no ACL token required here since this doesn't leak any
// sensitive information, and we don't want people to have to use
// management tokens if they are querying this via a health check.
// Poll the latest status.
a.srv.aclReplicationStatusLock.RLock()
*reply = a.srv.aclReplicationStatus
a.srv.aclReplicationStatusLock.RUnlock()
return nil
}
func timePointer(t time.Time) *time.Time {
return &t
}
func (a *ACL) RoleRead(args *structs.ACLRoleGetRequest, reply *structs.ACLRoleResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.RoleRead", args, reply); done {
return err
}
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var (
index uint64
role *structs.ACLRole
err error
)
if args.RoleID != "" {
index, role, err = state.ACLRoleGetByID(ws, args.RoleID, &args.EnterpriseMeta)
} else {
index, role, err = state.ACLRoleGetByName(ws, args.RoleName, &args.EnterpriseMeta)
}
if err != nil {
return err
}
reply.Index, reply.Role = index, role
if role == nil {
return errNotFound
}
return nil
})
}
func (a *ACL) RoleBatchRead(args *structs.ACLRoleBatchGetRequest, reply *structs.ACLRoleBatchResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.RoleBatchRead", args, reply); done {
return err
}
authz, err := a.srv.ResolveToken(args.Token)
if err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, roles, err := state.ACLRoleBatchGet(ws, args.RoleIDs)
if err != nil {
return err
}
a.srv.filterACLWithAuthorizer(authz, &roles)
reply.Index, reply.Roles = index, roles
return nil
})
}
func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.Role.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.InPrimaryDatacenter() {
args.Datacenter = a.srv.config.PrimaryDatacenter
}
if done, err := a.srv.ForwardRPC("ACL.RoleSet", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "role", "upsert"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.Role.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
}
role := &args.Role
state := a.srv.fsm.State()
// Almost all of the checks here are also done in the state store. However,
// we want to prevent the raft operations when we know they are going to fail
// so we still do them here.
// ensure a name is set
if role.Name == "" {
return fmt.Errorf("Invalid Role: no Name is set")
}
if !acl.IsValidRoleName(role.Name) {
return fmt.Errorf("Invalid Role: invalid Name. Only alphanumeric characters, '-' and '_' are allowed")
}
2019-11-01 20:11:44 +00:00
var existing *structs.ACLRole
var err error
if role.ID == "" {
// with no role ID one will be generated
role.ID, err = lib.GenerateUUID(a.srv.checkRoleUUID)
if err != nil {
return err
}
// validate the name is unique
2019-11-01 20:11:44 +00:00
if _, existing, err = state.ACLRoleGetByName(nil, role.Name, &role.EnterpriseMeta); err != nil {
return fmt.Errorf("acl role lookup by name failed: %v", err)
} else if existing != nil {
return fmt.Errorf("Invalid Role: A Role with Name %q already exists", role.Name)
}
} else {
if _, err := uuid.ParseUUID(role.ID); err != nil {
return fmt.Errorf("Role ID invalid UUID")
}
// Verify the role exists
2019-11-01 20:11:44 +00:00
_, existing, err = state.ACLRoleGetByID(nil, role.ID, nil)
if err != nil {
return fmt.Errorf("acl role lookup failed: %v", err)
} else if existing == nil {
return fmt.Errorf("cannot find role %s", role.ID)
}
if existing.Name != role.Name {
if _, nameMatch, err := state.ACLRoleGetByName(nil, role.Name, &role.EnterpriseMeta); err != nil {
return fmt.Errorf("acl role lookup by name failed: %v", err)
} else if nameMatch != nil {
return fmt.Errorf("Invalid Role: A role with name %q already exists", role.Name)
}
}
}
// validate the enterprise specific fields
if err := a.roleUpsertValidateEnterprise(role, existing); err != nil {
2019-11-01 20:11:44 +00:00
return err
}
policyIDs := make(map[string]struct{})
var policies []structs.ACLRolePolicyLink
// Validate all the policy names and convert them to policy IDs
for _, link := range role.Policies {
if link.ID == "" {
_, policy, err := state.ACLPolicyGetByName(nil, link.Name, &role.EnterpriseMeta)
if err != nil {
return fmt.Errorf("Error looking up policy for name %q: %v", link.Name, err)
}
if policy == nil {
return fmt.Errorf("No such ACL policy with name %q", link.Name)
}
link.ID = policy.ID
}
// Do not store the policy name within raft/memdb as the policy could be renamed in the future.
link.Name = ""
// dedup policy links by id
if _, ok := policyIDs[link.ID]; !ok {
policies = append(policies, link)
policyIDs[link.ID] = struct{}{}
}
}
role.Policies = policies
for _, svcid := range role.ServiceIdentities {
if svcid.ServiceName == "" {
return fmt.Errorf("Service identity is missing the service name field on this role")
}
if !acl.IsValidServiceIdentityName(svcid.ServiceName) {
return fmt.Errorf("Service identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName)
}
}
role.ServiceIdentities = role.ServiceIdentities.Deduplicate()
for _, nodeid := range role.NodeIdentities {
if nodeid.NodeName == "" {
return fmt.Errorf("Node identity is missing the node name field on this role")
}
if nodeid.Datacenter == "" {
return fmt.Errorf("Node identity is missing the datacenter field on this role")
}
if !acl.IsValidNodeIdentityName(nodeid.NodeName) {
return fmt.Errorf("Node identity has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed")
}
}
role.NodeIdentities = role.NodeIdentities.Deduplicate()
// calculate the hash for this role
role.SetHash(true)
req := &structs.ACLRoleBatchSetRequest{
Roles: structs.ACLRoles{role},
}
_, err = a.srv.raftApply(structs.ACLRoleSetRequestType, req)
if err != nil {
return fmt.Errorf("Failed to apply role upsert request: %v", err)
}
// Remove from the cache to prevent stale cache usage
a.srv.ACLResolver.cache.RemoveRole(role.ID)
if _, role, err := a.srv.fsm.State().ACLRoleGetByID(nil, role.ID, &role.EnterpriseMeta); err == nil && role != nil {
*reply = *role
}
return nil
}
func (a *ACL) RoleDelete(args *structs.ACLRoleDeleteRequest, reply *string) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.InPrimaryDatacenter() {
args.Datacenter = a.srv.config.PrimaryDatacenter
}
if done, err := a.srv.ForwardRPC("ACL.RoleDelete", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "role", "delete"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
}
_, role, err := a.srv.fsm.State().ACLRoleGetByID(nil, args.RoleID, &args.EnterpriseMeta)
if err != nil {
return err
}
if role == nil {
if ns := args.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("role not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("role does not exist: %w", acl.ErrNotFound)
}
req := structs.ACLRoleBatchDeleteRequest{
RoleIDs: []string{args.RoleID},
}
_, err = a.srv.raftApply(structs.ACLRoleDeleteRequestType, &req)
if err != nil {
return fmt.Errorf("Failed to apply role delete request: %v", err)
}
a.srv.ACLResolver.cache.RemoveRole(role.ID)
2020-08-21 15:42:37 +00:00
*reply = role.Name
return nil
}
func (a *ACL) RoleList(args *structs.ACLRoleListRequest, reply *structs.ACLRoleListResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.RoleList", args, reply); done {
return err
}
var authzContext acl.AuthorizerContext
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
if err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, roles, err := state.ACLRoleList(ws, args.Policy, &args.EnterpriseMeta)
if err != nil {
return err
}
a.srv.filterACLWithAuthorizer(authz, &roles)
reply.Index, reply.Roles = index, roles
return nil
})
}
// RoleResolve is used to retrieve a subset of the roles associated with a given token
// The role ids in the args simply act as a filter on the role set assigned to the token
func (a *ACL) RoleResolve(args *structs.ACLRoleBatchGetRequest, reply *structs.ACLRoleBatchResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.RoleResolve", args, reply); done {
return err
}
// get full list of roles for this token
identity, roles, err := a.srv.ACLResolver.resolveTokenToIdentityAndRoles(args.Token)
if err != nil {
return err
}
entIdentity, entRoles, err := a.srv.ACLResolver.resolveEnterpriseIdentityAndRoles(identity)
if err != nil {
return err
}
idMap := make(map[string]*structs.ACLRole)
for _, roleID := range identity.RoleIDs() {
idMap[roleID] = nil
}
if entIdentity != nil {
for _, roleID := range entIdentity.RoleIDs() {
idMap[roleID] = nil
}
}
for _, role := range roles {
idMap[role.ID] = role
}
for _, role := range entRoles {
idMap[role.ID] = role
}
for _, roleID := range args.RoleIDs {
if role, ok := idMap[roleID]; ok {
// only add non-deleted roles
if role != nil {
reply.Roles = append(reply.Roles, role)
}
} else {
// send a permission denied to indicate that the request included
// role ids not associated with this token
return acl.ErrPermissionDenied
}
}
a.srv.SetQueryMeta(&reply.QueryMeta, args.Token)
return nil
}
var errAuthMethodsRequireTokenReplication = errors.New("Token replication is required for auth methods to function")
func (a *ACL) BindingRuleRead(args *structs.ACLBindingRuleGetRequest, reply *structs.ACLBindingRuleResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.BindingRuleRead", args, reply); done {
return err
}
var authzContext acl.AuthorizerContext
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
if err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, rule, err := state.ACLBindingRuleGetByID(ws, args.BindingRuleID, &args.EnterpriseMeta)
if err != nil {
return err
}
reply.Index, reply.BindingRule = index, rule
if rule == nil {
return errNotFound
}
return nil
})
}
func (a *ACL) BindingRuleSet(args *structs.ACLBindingRuleSetRequest, reply *structs.ACLBindingRule) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.BindingRule.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.BindingRuleSet", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "bindingrule", "upsert"}, time.Now())
var authzContext acl.AuthorizerContext
// Verify token is permitted to modify ACLs
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.BindingRule.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
}
var existing *structs.ACLBindingRule
rule := &args.BindingRule
state := a.srv.fsm.State()
if rule.ID == "" {
// with no binding rule ID one will be generated
var err error
rule.ID, err = lib.GenerateUUID(a.srv.checkBindingRuleUUID)
if err != nil {
return err
}
} else {
if _, err := uuid.ParseUUID(rule.ID); err != nil {
return fmt.Errorf("Binding Rule ID invalid UUID")
}
// Verify the role exists
var err error
// specifically disregarding the enterprise meta here
_, existing, err = state.ACLBindingRuleGetByID(nil, rule.ID, nil)
if err != nil {
return fmt.Errorf("acl binding rule lookup failed: %v", err)
} else if existing == nil {
return fmt.Errorf("cannot find binding rule %s", rule.ID)
}
if rule.AuthMethod == "" {
rule.AuthMethod = existing.AuthMethod
} else if existing.AuthMethod != rule.AuthMethod {
return fmt.Errorf("the AuthMethod field of an Binding Rule is immutable")
}
}
if rule.AuthMethod == "" {
return fmt.Errorf("Invalid Binding Rule: no AuthMethod is set")
}
// this is done early here to produce better errors
if err := state.ACLBindingRuleUpsertValidateEnterprise(rule, existing); err != nil {
return err
}
methodIdx, method, err := state.ACLAuthMethodGetByName(nil, rule.AuthMethod, &args.BindingRule.EnterpriseMeta)
if err != nil {
return fmt.Errorf("acl auth method lookup failed: %v", err)
} else if method == nil {
return fmt.Errorf("cannot find auth method with name %q", rule.AuthMethod)
}
validator, err := a.srv.loadAuthMethodValidator(methodIdx, method)
if err != nil {
return err
}
// Create a blank placeholder identity for use in validation below.
blankID := validator.NewIdentity()
if rule.Selector != "" {
if _, err := bexpr.CreateEvaluatorForType(rule.Selector, nil, blankID.SelectableFields); err != nil {
return fmt.Errorf("invalid Binding Rule: Selector is invalid: %v", err)
}
}
if rule.BindType == "" {
return fmt.Errorf("Invalid Binding Rule: no BindType is set")
}
if rule.BindName == "" {
return fmt.Errorf("Invalid Binding Rule: no BindName is set")
}
switch rule.BindType {
case structs.BindingRuleBindTypeService:
case structs.BindingRuleBindTypeNode:
case structs.BindingRuleBindTypeRole:
default:
return fmt.Errorf("Invalid Binding Rule: unknown BindType %q", rule.BindType)
}
if valid, err := auth.IsValidBindName(rule.BindType, rule.BindName, blankID.ProjectedVarNames()); err != nil {
return fmt.Errorf("Invalid Binding Rule: invalid BindName: %v", err)
} else if !valid {
return fmt.Errorf("Invalid Binding Rule: invalid BindName")
}
req := &structs.ACLBindingRuleBatchSetRequest{
BindingRules: structs.ACLBindingRules{rule},
}
_, err = a.srv.raftApply(structs.ACLBindingRuleSetRequestType, req)
if err != nil {
return fmt.Errorf("Failed to apply binding rule upsert request: %v", err)
}
if _, rule, err := a.srv.fsm.State().ACLBindingRuleGetByID(nil, rule.ID, &rule.EnterpriseMeta); err == nil && rule != nil {
*reply = *rule
}
return nil
}
func (a *ACL) BindingRuleDelete(args *structs.ACLBindingRuleDeleteRequest, reply *bool) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.BindingRuleDelete", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "bindingrule", "delete"}, time.Now())
var authzContext acl.AuthorizerContext
// Verify token is permitted to modify ACLs
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
}
_, rule, err := a.srv.fsm.State().ACLBindingRuleGetByID(nil, args.BindingRuleID, &args.EnterpriseMeta)
if err != nil {
return err
}
if rule == nil {
if ns := args.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("binding rule not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("binding rule does not exist: %w", acl.ErrNotFound)
}
req := structs.ACLBindingRuleBatchDeleteRequest{
BindingRuleIDs: []string{args.BindingRuleID},
}
_, err = a.srv.raftApply(structs.ACLBindingRuleDeleteRequestType, &req)
if err != nil {
return fmt.Errorf("Failed to apply binding rule delete request: %v", err)
}
*reply = true
return nil
}
func (a *ACL) BindingRuleList(args *structs.ACLBindingRuleListRequest, reply *structs.ACLBindingRuleListResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.BindingRuleList", args, reply); done {
return err
}
var authzContext acl.AuthorizerContext
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
if err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, rules, err := state.ACLBindingRuleList(ws, args.AuthMethod, &args.EnterpriseMeta)
if err != nil {
return err
}
a.srv.filterACLWithAuthorizer(authz, &rules)
reply.Index, reply.BindingRules = index, rules
return nil
})
}
func (a *ACL) AuthMethodRead(args *structs.ACLAuthMethodGetRequest, reply *structs.ACLAuthMethodResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.AuthMethodRead", args, reply); done {
return err
}
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, method, err := state.ACLAuthMethodGetByName(ws, args.AuthMethodName, &args.EnterpriseMeta)
if err != nil {
return err
}
reply.Index, reply.AuthMethod = index, method
if method == nil {
return errNotFound
2020-05-12 01:59:29 +00:00
}
_ = a.srv.enterpriseAuthMethodTypeValidation(method.Type)
return nil
})
}
func (a *ACL) AuthMethodSet(args *structs.ACLAuthMethodSetRequest, reply *structs.ACLAuthMethod) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.AuthMethod.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.AuthMethodSet", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "authmethod", "upsert"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.AuthMethod.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
}
method := &args.AuthMethod
state := a.srv.fsm.State()
// ensure a name is set
if method.Name == "" {
return fmt.Errorf("Invalid Auth Method: no Name is set")
}
if !acl.IsValidAuthMethodName(method.Name) {
return fmt.Errorf("Invalid Auth Method: invalid Name. Only alphanumeric characters, '-' and '_' are allowed")
}
if err := a.srv.enterpriseAuthMethodTypeValidation(method.Type); err != nil {
2020-05-12 01:59:29 +00:00
return err
}
// Check to see if the method exists first.
_, existing, err := state.ACLAuthMethodGetByName(nil, method.Name, &method.EnterpriseMeta)
if err != nil {
return fmt.Errorf("acl auth method lookup failed: %v", err)
}
if existing != nil {
if method.Type == "" {
method.Type = existing.Type
} else if existing.Type != method.Type {
return fmt.Errorf("the Type field of an Auth Method is immutable")
}
}
if !authmethod.IsRegisteredType(method.Type) {
return fmt.Errorf("Invalid Auth Method: Type should be one of: %v", authmethod.Types())
}
if method.MaxTokenTTL != 0 {
if method.MaxTokenTTL > a.srv.config.ACLTokenMaxExpirationTTL {
return fmt.Errorf("MaxTokenTTL %s cannot be more than %s",
method.MaxTokenTTL, a.srv.config.ACLTokenMaxExpirationTTL)
} else if method.MaxTokenTTL < a.srv.config.ACLTokenMinExpirationTTL {
return fmt.Errorf("MaxTokenTTL %s cannot be less than %s",
method.MaxTokenTTL, a.srv.config.ACLTokenMinExpirationTTL)
}
}
switch method.TokenLocality {
case "local", "":
case "global":
if !a.srv.InPrimaryDatacenter() {
return fmt.Errorf("Invalid Auth Method: TokenLocality 'global' can only be used in the primary datacenter")
}
default:
return fmt.Errorf("Invalid Auth Method: TokenLocality should be one of 'local' or 'global'")
}
// Instantiate a validator but do not cache it yet. This will validate the
// configuration.
validator, err := authmethod.NewValidator(a.srv.logger, method)
if err != nil {
return fmt.Errorf("Invalid Auth Method: %v", err)
}
if err := enterpriseAuthMethodValidation(method, validator); err != nil {
return err
}
if err := a.srv.fsm.State().ACLAuthMethodUpsertValidateEnterprise(method, existing); err != nil {
return err
}
req := &structs.ACLAuthMethodBatchSetRequest{
AuthMethods: structs.ACLAuthMethods{method},
}
_, err = a.srv.raftApply(structs.ACLAuthMethodSetRequestType, req)
if err != nil {
return fmt.Errorf("Failed to apply auth method upsert request: %v", err)
}
if _, method, err := a.srv.fsm.State().ACLAuthMethodGetByName(nil, method.Name, &method.EnterpriseMeta); err == nil && method != nil {
*reply = *method
}
return nil
}
func (a *ACL) AuthMethodDelete(args *structs.ACLAuthMethodDeleteRequest, reply *bool) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, true); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.AuthMethodDelete", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "authmethod", "delete"}, time.Now())
// Verify token is permitted to modify ACLs
var authzContext acl.AuthorizerContext
if authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext); err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLWriteAllowed(&authzContext); err != nil {
return err
}
_, method, err := a.srv.fsm.State().ACLAuthMethodGetByName(nil, args.AuthMethodName, &args.EnterpriseMeta)
if err != nil {
return err
}
if method == nil {
if ns := args.EnterpriseMeta.NamespaceOrEmpty(); ns != "" {
return fmt.Errorf("auth method not found in namespace %s: %w", ns, acl.ErrNotFound)
}
return fmt.Errorf("auth method does not exist: %w", acl.ErrNotFound)
}
if err := a.srv.enterpriseAuthMethodTypeValidation(method.Type); err != nil {
2020-05-12 01:59:29 +00:00
return err
}
req := structs.ACLAuthMethodBatchDeleteRequest{
AuthMethodNames: []string{args.AuthMethodName},
EnterpriseMeta: args.EnterpriseMeta,
}
_, err = a.srv.raftApply(structs.ACLAuthMethodDeleteRequestType, &req)
if err != nil {
return fmt.Errorf("Failed to apply auth method delete request: %v", err)
}
*reply = true
return nil
}
func (a *ACL) AuthMethodList(args *structs.ACLAuthMethodListRequest, reply *structs.ACLAuthMethodListResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if err := a.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if done, err := a.srv.ForwardRPC("ACL.AuthMethodList", args, reply); done {
return err
}
var authzContext acl.AuthorizerContext
authz, err := a.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
if err != nil {
return err
} else if err := authz.ToAllowAuthorizer().ACLReadAllowed(&authzContext); err != nil {
return err
}
return a.srv.blockingQuery(&args.QueryOptions, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, methods, err := state.ACLAuthMethodList(ws, &args.EnterpriseMeta)
if err != nil {
return err
}
a.srv.filterACLWithAuthorizer(authz, &methods)
var stubs structs.ACLAuthMethodListStubs
for _, method := range methods {
_ = a.srv.enterpriseAuthMethodTypeValidation(method.Type)
stubs = append(stubs, method.Stub())
}
reply.Index, reply.AuthMethods = index, stubs
return nil
})
}
func (a *ACL) Login(args *structs.ACLLoginRequest, reply *structs.ACLToken) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if args.Auth == nil {
return fmt.Errorf("Invalid Login request: Missing auth parameters")
}
if err := a.srv.validateEnterpriseRequest(&args.Auth.EnterpriseMeta, true); err != nil {
return err
}
if args.Token != "" { // This shouldn't happen.
return errors.New("do not provide a token when logging in")
}
if done, err := a.srv.ForwardRPC("ACL.Login", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "login"}, time.Now())
authMethod, validator, err := a.srv.loadAuthMethod(args.Auth.AuthMethod, &args.Auth.EnterpriseMeta)
if err != nil {
return err
}
verifiedIdentity, err := validator.ValidateLogin(context.Background(), args.Auth.BearerToken)
if err != nil {
return err
}
description, err := auth.BuildTokenDescription("token created via login", args.Auth.Meta)
if err != nil {
return err
}
token, err := a.srv.aclLogin().TokenForVerifiedIdentity(verifiedIdentity, authMethod, description)
if err == nil {
*reply = *token
}
return err
}
func (a *ACL) Logout(args *structs.ACLLogoutRequest, reply *bool) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if !a.srv.LocalTokensEnabled() {
return errAuthMethodsRequireTokenReplication
}
if args.Token == "" {
return fmt.Errorf("no valid token ID provided: %w", acl.ErrNotFound)
}
if done, err := a.srv.ForwardRPC("ACL.Logout", args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"acl", "logout"}, time.Now())
// No need to check expiration time because it's being deleted.
err := a.srv.aclTokenWriter().Delete(args.Token, true)
switch {
case errors.Is(err, auth.ErrCannotWriteGlobalToken):
// Writes to global tokens must be forwarded to the primary DC.
args.Datacenter = a.srv.config.PrimaryDatacenter
return a.srv.forwardDC("ACL.Logout", a.srv.config.PrimaryDatacenter, args, reply)
case err != nil:
return err
}
*reply = true
return nil
}
func (a *ACL) Authorize(args *structs.RemoteACLAuthorizationRequest, reply *[]structs.ACLAuthorizationResponse) error {
if err := a.aclPreCheck(); err != nil {
return err
}
if done, err := a.srv.ForwardRPC("ACL.Authorize", args, reply); done {
return err
}
authz, err := a.srv.ResolveToken(args.Token)
if err != nil {
return err
}
responses, err := structs.CreateACLAuthorizationResponses(authz, args.Requests)
if err != nil {
return err
}
*reply = responses
return nil
}