Merge pull request #4687 from hashicorp/connect-multidc-config
Connect multi-dc config
This commit is contained in:
commit
c22ee028c1
|
@ -874,6 +874,7 @@ func (a *Agent) consulConfig() (*consul.Config, error) {
|
|||
// todo(fs): these are now always set in the runtime config so we can simplify this
|
||||
// todo(fs): or is there a reason to keep it like that?
|
||||
base.Datacenter = a.config.Datacenter
|
||||
base.PrimaryDatacenter = a.config.PrimaryDatacenter
|
||||
base.DataDir = a.config.DataDir
|
||||
base.NodeName = a.config.NodeName
|
||||
|
||||
|
@ -1047,6 +1048,7 @@ func (a *Agent) consulConfig() (*consul.Config, error) {
|
|||
// Copy the Connect CA bootstrap config
|
||||
if a.config.ConnectEnabled {
|
||||
base.ConnectEnabled = true
|
||||
base.ConnectReplicationToken = a.config.ConnectReplicationToken
|
||||
|
||||
// Allow config to specify cluster_id provided it's a valid UUID. This is
|
||||
// meant only for tests where a deterministic ID makes fixtures much simpler
|
||||
|
|
|
@ -1152,6 +1152,9 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in
|
|||
case "acl_replication_token":
|
||||
s.agent.tokens.UpdateACLReplicationToken(args.Token)
|
||||
|
||||
case "connect_replication_token":
|
||||
s.agent.tokens.UpdateConnectReplicationToken(args.Token)
|
||||
|
||||
default:
|
||||
resp.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintf(resp, "Token %q is unknown", target)
|
||||
|
|
|
@ -558,6 +558,15 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
|
|||
})
|
||||
}
|
||||
|
||||
primaryDatacenter := strings.ToLower(b.stringVal(c.PrimaryDatacenter))
|
||||
if c.ACLDatacenter != nil {
|
||||
b.warn("The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.")
|
||||
|
||||
if primaryDatacenter == "" {
|
||||
primaryDatacenter = strings.ToLower(b.stringVal(c.ACLDatacenter))
|
||||
}
|
||||
}
|
||||
|
||||
proxyDefaultExecMode := b.stringVal(c.Connect.ProxyDefaults.ExecMode)
|
||||
proxyDefaultDaemonCommand := c.Connect.ProxyDefaults.DaemonCommand
|
||||
proxyDefaultScriptCommand := c.Connect.ProxyDefaults.ScriptCommand
|
||||
|
@ -737,6 +746,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
|
|||
NodeName: b.nodeName(c.NodeName),
|
||||
NonVotingServer: b.boolVal(c.NonVotingServer),
|
||||
PidFile: b.stringVal(c.PidFile),
|
||||
PrimaryDatacenter: primaryDatacenter,
|
||||
RPCAdvertiseAddr: rpcAdvertiseAddr,
|
||||
RPCBindAddr: rpcBindAddr,
|
||||
RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout),
|
||||
|
|
|
@ -212,6 +212,7 @@ type Config struct {
|
|||
Performance Performance `json:"performance,omitempty" hcl:"performance" mapstructure:"performance"`
|
||||
PidFile *string `json:"pid_file,omitempty" hcl:"pid_file" mapstructure:"pid_file"`
|
||||
Ports Ports `json:"ports,omitempty" hcl:"ports" mapstructure:"ports"`
|
||||
PrimaryDatacenter *string `json:"primary_datacenter,omitempty" hcl:"primary_datacenter" mapstructure:"primary_datacenter"`
|
||||
RPCProtocol *int `json:"protocol,omitempty" hcl:"protocol" mapstructure:"protocol"`
|
||||
RaftProtocol *int `json:"raft_protocol,omitempty" hcl:"raft_protocol" mapstructure:"raft_protocol"`
|
||||
RaftSnapshotThreshold *int `json:"raft_snapshot_threshold,omitempty" hcl:"raft_snapshot_threshold" mapstructure:"raft_snapshot_threshold"`
|
||||
|
@ -484,11 +485,12 @@ type Upstream struct {
|
|||
type Connect struct {
|
||||
// Enabled opts the agent into connect. It should be set on all clients and
|
||||
// servers in a cluster for correct connect operation.
|
||||
Enabled *bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"`
|
||||
Proxy ConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
|
||||
ProxyDefaults ConnectProxyDefaults `json:"proxy_defaults,omitempty" hcl:"proxy_defaults" mapstructure:"proxy_defaults"`
|
||||
CAProvider *string `json:"ca_provider,omitempty" hcl:"ca_provider" mapstructure:"ca_provider"`
|
||||
CAConfig map[string]interface{} `json:"ca_config,omitempty" hcl:"ca_config" mapstructure:"ca_config"`
|
||||
Enabled *bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"`
|
||||
Proxy ConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"`
|
||||
ProxyDefaults ConnectProxyDefaults `json:"proxy_defaults,omitempty" hcl:"proxy_defaults" mapstructure:"proxy_defaults"`
|
||||
CAProvider *string `json:"ca_provider,omitempty" hcl:"ca_provider" mapstructure:"ca_provider"`
|
||||
CAConfig map[string]interface{} `json:"ca_config,omitempty" hcl:"ca_config" mapstructure:"ca_config"`
|
||||
ReplicationToken *string `json:"replication_token,omitempty" hcl:"replication_token" mapstructure:"replication_token"`
|
||||
}
|
||||
|
||||
// ConnectProxy is the agent-global connect proxy configuration.
|
||||
|
|
|
@ -503,6 +503,9 @@ type RuntimeConfig struct {
|
|||
// ConnectCAConfig is the config to use for the CA provider.
|
||||
ConnectCAConfig map[string]interface{}
|
||||
|
||||
// ConnectReplicationToken is the ACL token used for replicating intentions.
|
||||
ConnectReplicationToken string
|
||||
|
||||
// ConnectTestDisableManagedProxies is not exposed to public config but us
|
||||
// used by TestAgent to prevent self-executing the test binary in the
|
||||
// background if a managed proxy is created for a test. The only place we
|
||||
|
@ -800,6 +803,13 @@ type RuntimeConfig struct {
|
|||
// hcl: pid_file = string
|
||||
PidFile string
|
||||
|
||||
// PrimaryDatacenter is the central datacenter that holds authoritative
|
||||
// ACL records, replicates intentions and holds the root CA for Connect.
|
||||
// This must be the same for the entire cluster. Off by default.
|
||||
//
|
||||
// hcl: primary_datacenter = string
|
||||
PrimaryDatacenter string
|
||||
|
||||
// RPCAdvertiseAddr is the TCP address Consul advertises for its RPC endpoint.
|
||||
// By default this is the bind address on the default RPC Server port. If the
|
||||
// advertise address is specified then it is used.
|
||||
|
|
|
@ -1379,7 +1379,9 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
|
|||
patch: func(rt *RuntimeConfig) {
|
||||
rt.ACLDatacenter = "a"
|
||||
rt.DataDir = dataDir
|
||||
rt.PrimaryDatacenter = "a"
|
||||
},
|
||||
warns: []string{`The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.`},
|
||||
},
|
||||
{
|
||||
desc: "acl_replication_token enables acl replication",
|
||||
|
@ -1472,9 +1474,10 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
|
|||
`-datacenter=a`,
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`{ "acl_datacenter": "%" }`},
|
||||
hcl: []string{`acl_datacenter = "%"`},
|
||||
err: `acl_datacenter cannot be "%". Please use only [a-z0-9-_]`,
|
||||
json: []string{`{ "acl_datacenter": "%" }`},
|
||||
hcl: []string{`acl_datacenter = "%"`},
|
||||
err: `acl_datacenter cannot be "%". Please use only [a-z0-9-_]`,
|
||||
warns: []string{`The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.`},
|
||||
},
|
||||
{
|
||||
desc: "autopilot.max_trailing_logs invalid",
|
||||
|
@ -3013,6 +3016,7 @@ func TestFullConfig(t *testing.T) {
|
|||
"sidecar_max_port": 9999
|
||||
},
|
||||
"protocol": 30793,
|
||||
"primary_datacenter": "ejtmd43d",
|
||||
"raft_protocol": 19016,
|
||||
"raft_snapshot_threshold": 16384,
|
||||
"raft_snapshot_interval": "30s",
|
||||
|
@ -3543,6 +3547,7 @@ func TestFullConfig(t *testing.T) {
|
|||
sidecar_max_port = 9999
|
||||
}
|
||||
protocol = 30793
|
||||
primary_datacenter = "ejtmd43d"
|
||||
raft_protocol = 19016
|
||||
raft_snapshot_threshold = 16384
|
||||
raft_snapshot_interval = "30s"
|
||||
|
@ -4146,6 +4151,7 @@ func TestFullConfig(t *testing.T) {
|
|||
NodeName: "otlLxGaI",
|
||||
NonVotingServer: true,
|
||||
PidFile: "43xN80Km",
|
||||
PrimaryDatacenter: "ejtmd43d",
|
||||
RPCAdvertiseAddr: tcpAddr("17.99.29.16:3757"),
|
||||
RPCBindAddr: tcpAddr("16.99.34.17:3757"),
|
||||
RPCHoldTimeout: 15707 * time.Second,
|
||||
|
@ -4488,6 +4494,7 @@ func TestFullConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
warns := []string{
|
||||
`The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.`,
|
||||
`bootstrap_expect > 0: expecting 53 servers`,
|
||||
}
|
||||
|
||||
|
@ -4849,6 +4856,7 @@ func TestSanitize(t *testing.T) {
|
|||
"ConnectProxyDefaultScriptCommand": [],
|
||||
"ConnectSidecarMaxPort": 0,
|
||||
"ConnectSidecarMinPort": 0,
|
||||
"ConnectReplicationToken": "hidden",
|
||||
"ConnectTestDisableManagedProxies": false,
|
||||
"ConsulCoordinateUpdateBatchSize": 0,
|
||||
"ConsulCoordinateUpdateMaxBatches": 0,
|
||||
|
@ -4931,6 +4939,7 @@ func TestSanitize(t *testing.T) {
|
|||
"NodeName": "",
|
||||
"NonVotingServer": false,
|
||||
"PidFile": "",
|
||||
"PrimaryDatacenter": "",
|
||||
"RPCAdvertiseAddr": "",
|
||||
"RPCBindAddr": "",
|
||||
"RPCHoldTimeout": "0s",
|
||||
|
|
|
@ -75,10 +75,14 @@ type Config struct {
|
|||
// of nodes.
|
||||
BootstrapExpect int
|
||||
|
||||
// Datacenter is the datacenter this Consul server represents
|
||||
// Datacenter is the datacenter this Consul server represents.
|
||||
Datacenter string
|
||||
|
||||
// DataDir is the directory to store our state in
|
||||
// PrimaryDatacenter is the authoritative datacenter for features like ACLs
|
||||
// and Connect.
|
||||
PrimaryDatacenter string
|
||||
|
||||
// DataDir is the directory to store our state in.
|
||||
DataDir string
|
||||
|
||||
// DevMode is used to enable a development server mode.
|
||||
|
@ -355,6 +359,9 @@ type Config struct {
|
|||
// CAConfig is used to apply the initial Connect CA configuration when
|
||||
// bootstrapping.
|
||||
CAConfig *structs.CAConfiguration
|
||||
|
||||
// ConnectReplicationToken is used to control Intention replication.
|
||||
ConnectReplicationToken string
|
||||
}
|
||||
|
||||
// CheckProtocolVersion validates the protocol version.
|
||||
|
|
|
@ -107,7 +107,7 @@ func (s *ConnectCA) ConfigurationSet(
|
|||
return err
|
||||
}
|
||||
|
||||
newActiveRoot, err := parseCARoot(newRootPEM, args.Config.Provider)
|
||||
newActiveRoot, err := parseCARoot(newRootPEM, args.Config.Provider, args.Config.ClusterID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -120,7 +120,10 @@ func (s *ConnectCA) ConfigurationSet(
|
|||
return err
|
||||
}
|
||||
|
||||
if root != nil && root.ID == newActiveRoot.ID {
|
||||
// If the root didn't change or if this is a secondary DC, just update the
|
||||
// config and return.
|
||||
if (s.srv.config.Datacenter != s.srv.config.PrimaryDatacenter) ||
|
||||
root != nil && root.ID == newActiveRoot.ID {
|
||||
args.Op = structs.CAOpSetConfig
|
||||
resp, err := s.srv.raftApply(structs.ConnectCARequestType, args)
|
||||
if err != nil {
|
||||
|
@ -276,16 +279,17 @@ func (s *ConnectCA) Roots(
|
|||
// directly to the structure in the memdb store.
|
||||
|
||||
reply.Roots[i] = &structs.CARoot{
|
||||
ID: r.ID,
|
||||
Name: r.Name,
|
||||
SerialNumber: r.SerialNumber,
|
||||
SigningKeyID: r.SigningKeyID,
|
||||
NotBefore: r.NotBefore,
|
||||
NotAfter: r.NotAfter,
|
||||
RootCert: r.RootCert,
|
||||
IntermediateCerts: r.IntermediateCerts,
|
||||
RaftIndex: r.RaftIndex,
|
||||
Active: r.Active,
|
||||
ID: r.ID,
|
||||
Name: r.Name,
|
||||
SerialNumber: r.SerialNumber,
|
||||
SigningKeyID: r.SigningKeyID,
|
||||
ExternalTrustDomain: r.ExternalTrustDomain,
|
||||
NotBefore: r.NotBefore,
|
||||
NotAfter: r.NotAfter,
|
||||
RootCert: r.RootCert,
|
||||
IntermediateCerts: r.IntermediateCerts,
|
||||
RaftIndex: r.RaftIndex,
|
||||
Active: r.Active,
|
||||
}
|
||||
|
||||
if r.Active {
|
||||
|
|
|
@ -227,6 +227,8 @@ func (s *Server) establishLeadership() error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.startEnterpriseLeader()
|
||||
|
||||
s.startCARootPruning()
|
||||
|
||||
s.setConsistentReadReady()
|
||||
|
@ -245,6 +247,8 @@ func (s *Server) revokeLeadership() error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.stopEnterpriseLeader()
|
||||
|
||||
s.stopCARootPruning()
|
||||
|
||||
s.setCAProvider(nil, nil)
|
||||
|
@ -414,24 +418,8 @@ func (s *Server) initializeCAConfig() (*structs.CAConfiguration, error) {
|
|||
return config, nil
|
||||
}
|
||||
|
||||
// initializeCA sets up the CA provider when gaining leadership, bootstrapping
|
||||
// the root in the state store if necessary.
|
||||
func (s *Server) initializeCA() error {
|
||||
// Bail if connect isn't enabled.
|
||||
if !s.config.ConnectEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
conf, err := s.initializeCAConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize the provider based on the current config.
|
||||
provider, err := s.createCAProvider(conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// initializeRootCA runs the initialization logic for a root CA.
|
||||
func (s *Server) initializeRootCA(provider ca.Provider, conf *structs.CAConfiguration) error {
|
||||
if err := provider.Configure(conf.ClusterID, true, conf.Config); err != nil {
|
||||
return fmt.Errorf("error configuring provider: %v", err)
|
||||
}
|
||||
|
@ -445,7 +433,7 @@ func (s *Server) initializeCA() error {
|
|||
return fmt.Errorf("error getting root cert: %v", err)
|
||||
}
|
||||
|
||||
rootCA, err := parseCARoot(rootPEM, conf.Provider)
|
||||
rootCA, err := parseCARoot(rootPEM, conf.Provider, conf.ClusterID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -495,13 +483,13 @@ func (s *Server) initializeCA() error {
|
|||
|
||||
s.setCAProvider(provider, rootCA)
|
||||
|
||||
s.logger.Printf("[INFO] connect: initialized CA with provider %q", conf.Provider)
|
||||
s.logger.Printf("[INFO] connect: initialized primary datacenter CA with provider %q", conf.Provider)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseCARoot returns a filled-in structs.CARoot from a raw PEM value.
|
||||
func parseCARoot(pemValue, provider string) (*structs.CARoot, error) {
|
||||
func parseCARoot(pemValue, provider, clusterID string) (*structs.CARoot, error) {
|
||||
id, err := connect.CalculateCertFingerprint(pemValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing root fingerprint: %v", err)
|
||||
|
@ -511,14 +499,15 @@ func parseCARoot(pemValue, provider string) (*structs.CARoot, error) {
|
|||
return nil, fmt.Errorf("error parsing root cert: %v", err)
|
||||
}
|
||||
return &structs.CARoot{
|
||||
ID: id,
|
||||
Name: fmt.Sprintf("%s CA Root Cert", strings.Title(provider)),
|
||||
SerialNumber: rootCert.SerialNumber.Uint64(),
|
||||
SigningKeyID: connect.HexString(rootCert.AuthorityKeyId),
|
||||
NotBefore: rootCert.NotBefore,
|
||||
NotAfter: rootCert.NotAfter,
|
||||
RootCert: pemValue,
|
||||
Active: true,
|
||||
ID: id,
|
||||
Name: fmt.Sprintf("%s CA Root Cert", strings.Title(provider)),
|
||||
SerialNumber: rootCert.SerialNumber.Uint64(),
|
||||
SigningKeyID: connect.HexString(rootCert.AuthorityKeyId),
|
||||
ExternalTrustDomain: clusterID,
|
||||
NotBefore: rootCert.NotBefore,
|
||||
NotAfter: rootCert.NotAfter,
|
||||
RootCert: pemValue,
|
||||
Active: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
// +build !ent
|
||||
|
||||
package consul
|
||||
|
||||
// initializeCA sets up the CA provider when gaining leadership, bootstrapping
|
||||
// the root in the state store if necessary.
|
||||
func (s *Server) initializeCA() error {
|
||||
// Bail if connect isn't enabled.
|
||||
if !s.config.ConnectEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
conf, err := s.initializeCAConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize the provider based on the current config.
|
||||
provider, err := s.createCAProvider(conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.initializeRootCA(provider, conf)
|
||||
}
|
||||
|
||||
// Stub methods, only present in Consul Enterprise.
|
||||
func (s *Server) startEnterpriseLeader() {}
|
||||
func (s *Server) stopEnterpriseLeader() {}
|
|
@ -274,6 +274,15 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store) (*
|
|||
config.UseTLS = true
|
||||
}
|
||||
|
||||
// Set the primary DC if it wasn't set.
|
||||
if config.PrimaryDatacenter == "" {
|
||||
if config.ACLDatacenter != "" {
|
||||
config.PrimaryDatacenter = config.ACLDatacenter
|
||||
} else {
|
||||
config.PrimaryDatacenter = config.Datacenter
|
||||
}
|
||||
}
|
||||
|
||||
// Create the TLS wrapper for outgoing connections.
|
||||
tlsConf := config.tlsConfig()
|
||||
tlsWrap, err := tlsConf.OutgoingTLSWrapper()
|
||||
|
|
|
@ -54,6 +54,9 @@ type CARoot struct {
|
|||
// private key used to sign the certificate.
|
||||
SigningKeyID string
|
||||
|
||||
// ExternalTrustDomain is the trust domain this root was generated under.
|
||||
ExternalTrustDomain string
|
||||
|
||||
// Time validity bounds.
|
||||
NotBefore time.Time
|
||||
NotAfter time.Time
|
||||
|
|
|
@ -30,6 +30,10 @@ type Store struct {
|
|||
// aclReplicationToken is a special token that's used by servers to
|
||||
// replicate ACLs from the ACL datacenter.
|
||||
aclReplicationToken string
|
||||
|
||||
// connectReplicationToken is a special token that's used by servers to
|
||||
// replicate intentions from the primary datacenter.
|
||||
connectReplicationToken string
|
||||
}
|
||||
|
||||
// UpdateUserToken replaces the current user token in the store.
|
||||
|
@ -60,6 +64,13 @@ func (t *Store) UpdateACLReplicationToken(token string) {
|
|||
t.l.Unlock()
|
||||
}
|
||||
|
||||
// UpdateConnectReplicationToken replaces the current Connect replication token in the store.
|
||||
func (t *Store) UpdateConnectReplicationToken(token string) {
|
||||
t.l.Lock()
|
||||
t.connectReplicationToken = token
|
||||
t.l.Unlock()
|
||||
}
|
||||
|
||||
// UserToken returns the best token to use for user operations.
|
||||
func (t *Store) UserToken() string {
|
||||
t.l.RLock()
|
||||
|
@ -87,6 +98,14 @@ func (t *Store) ACLReplicationToken() string {
|
|||
return t.aclReplicationToken
|
||||
}
|
||||
|
||||
// ConnectReplicationToken returns the Connect replication token.
|
||||
func (t *Store) ConnectReplicationToken() string {
|
||||
t.l.RLock()
|
||||
defer t.l.RUnlock()
|
||||
|
||||
return t.connectReplicationToken
|
||||
}
|
||||
|
||||
// IsAgentMasterToken checks to see if a given token is the agent master token.
|
||||
// This will never match an empty token for safety.
|
||||
func (t *Store) IsAgentMasterToken(token string) bool {
|
||||
|
|
|
@ -377,7 +377,7 @@ $ curl \
|
|||
|
||||
- `SourceDatacenter` is the authoritative ACL datacenter that ACLs are being
|
||||
replicated from, and will match the
|
||||
[`acl_datacenter`](/docs/agent/options.html#acl_datacenter) configuration.
|
||||
[`primary_datacenter`](/docs/agent/options.html#primary_datacenter) configuration.
|
||||
|
||||
- `ReplicatedIndex` is the last index that was successfully replicated. You can
|
||||
compare this to the `X-Consul-Index` header returned by the
|
||||
|
|
|
@ -497,23 +497,23 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
|
||||
#### Configuration Key Reference
|
||||
|
||||
* <a name="acl_datacenter"></a><a href="#acl_datacenter">`acl_datacenter`</a> - This designates
|
||||
the datacenter which is authoritative for ACL information. It must be provided to enable ACLs.
|
||||
All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all
|
||||
you need for cluster-level enforcement, but for the APIs to forward properly from the clients,
|
||||
it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement
|
||||
of ACLs. Please see the [ACL Guide](/docs/guides/acl.html) for more details.
|
||||
* <a name="acl_datacenter"></a><a href="#acl_datacenter">`acl_datacenter`</a> - **This field is
|
||||
deprecated in Consul 1.4.0. See the [`primary_datacenter`](#primary_datacenter) field instead.**
|
||||
|
||||
This designates the datacenter which is authoritative for ACL information. It must be provided to enable ACLs. All servers and datacenters must agree on the ACL datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients,
|
||||
it must be set on them too. In Consul 0.8 and later, this also enables agent-level enforcement
|
||||
of ACLs. Please see the [ACL Guide](/docs/guides/acl.html) for more details.
|
||||
|
||||
* <a name="acl_default_policy"></a><a href="#acl_default_policy">`acl_default_policy`</a> - Either
|
||||
"allow" or "deny"; defaults to "allow". The default policy controls the behavior of a token when
|
||||
there is no matching rule. In "allow" mode, ACLs are a blacklist: any operation not specifically
|
||||
prohibited is allowed. In "deny" mode, ACLs are a whitelist: any operation not
|
||||
specifically allowed is blocked. *Note*: this will not take effect until you've set `acl_datacenter`
|
||||
specifically allowed is blocked. *Note*: this will not take effect until you've set `primary_datacenter`
|
||||
to enable ACL support.
|
||||
|
||||
* <a name="acl_down_policy"></a><a href="#acl_down_policy">`acl_down_policy`</a> - Either
|
||||
"allow", "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the case that the
|
||||
policy for a token cannot be read from the [`acl_datacenter`](#acl_datacenter) or leader
|
||||
policy for a token cannot be read from the [`primary_datacenter`](#primary_datacenter) or leader
|
||||
node, the down policy is applied. In "allow" mode, all actions are permitted, "deny" restricts
|
||||
all operations, and "extend-cache" allows any cached ACLs to be used, ignoring their TTL
|
||||
values. If a non-cached ACL is used, "extend-cache" acts like "deny".
|
||||
|
@ -546,7 +546,7 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
Please see the [ACL Guide](/docs/guides/acl.html#version_8_acls) for more details.
|
||||
|
||||
* <a name="acl_master_token"></a><a href="#acl_master_token">`acl_master_token`</a> - Only used
|
||||
for servers in the [`acl_datacenter`](#acl_datacenter). This token will be created with management-level
|
||||
for servers in the [`primary_datacenter`](#primary_datacenter). This token will be created with management-level
|
||||
permissions if it does not exist. It allows operators to bootstrap the ACL system
|
||||
with a token ID that is well-known.
|
||||
|
||||
|
@ -558,7 +558,7 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
the same as the other tokens, but isn't strictly necessary.
|
||||
|
||||
* <a name="acl_replication_token"></a><a href="#acl_replication_token">`acl_replication_token`</a> -
|
||||
Only used for servers outside the [`acl_datacenter`](#acl_datacenter) running Consul 0.7 or later.
|
||||
Only used for servers outside the [`primary_datacenter`](#primary_datacenter) running Consul 0.7 or later.
|
||||
When provided, this will enable [ACL replication](/docs/guides/acl.html#replication) using this
|
||||
token to retrieve and replicate the ACLs to the non-authoritative local datacenter. In Consul 0.9.1
|
||||
and later you can enable ACL replication using [`enable_acl_replication`](#enable_acl_replication)
|
||||
|
@ -756,6 +756,8 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
|
||||
* <a name="connect_proxy_defaults"></a><a href="#connect_proxy_defaults">`proxy_defaults`</a> This object configures the default proxy settings for [service definitions with managed proxies](/docs/agent/services.html). It accepts the fields `exec_mode`, `daemon_command`, and `config`. These are used as default values for the respective fields in the service definition.
|
||||
|
||||
* <a name="replication_token"></a><a href="#replication_token">`replication_token`</a> When provided, this will enable Connect replication using this token to retrieve and replicate the Intentions to the non-authoritative local datacenter.
|
||||
|
||||
* <a name="datacenter"></a><a href="#datacenter">`datacenter`</a> Equivalent to the
|
||||
[`-datacenter` command-line flag](#_datacenter).
|
||||
|
||||
|
@ -1122,6 +1124,12 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
* <a name="protocol"></a><a href="#protocol">`protocol`</a> Equivalent to the
|
||||
[`-protocol` command-line flag](#_protocol).
|
||||
|
||||
* <a name="primary_datacenter"></a><a href="#primary_datacenter">`primary_datacenter`</a> - This
|
||||
designates the datacenter which is authoritative for ACL information, intentions and is the root
|
||||
Certificate Authority for Connect. It must be provided to enable ACLs. All servers and datacenters
|
||||
must agree on the primary datacenter. Setting it on the servers is all you need for cluster-level enforcement, but for the APIs to forward properly from the clients, it must be set on them too. In
|
||||
Consul 0.8 and later, this also enables agent-level enforcement of ACLs. Please see the [ACL Guide](/docs/guides/acl.html) for more details.
|
||||
|
||||
* <a name="raft_protocol"></a><a href="#raft_protocol">`raft_protocol`</a> Equivalent to the
|
||||
[`-raft-protocol` command-line flag](#_raft_protocol).
|
||||
|
||||
|
|
|
@ -85,8 +85,8 @@ Constructing rules from these policies is covered in detail in the
|
|||
|
||||
#### ACL Datacenter
|
||||
|
||||
All nodes (clients and servers) must be configured with an
|
||||
[`acl_datacenter`](/docs/agent/options.html#acl_datacenter) which enables ACL
|
||||
All nodes (clients and servers) must be configured with a
|
||||
[`primary_datacenter`](/docs/agent/options.html#primary_datacenter) which enables ACL
|
||||
enforcement but also specifies the authoritative datacenter. Consul relies on
|
||||
[RPC forwarding](/docs/internals/architecture.html) to support multi-datacenter
|
||||
configurations. However, because requests can be made across datacenter boundaries,
|
||||
|
@ -115,7 +115,7 @@ as to whether they are set on servers, clients, or both.
|
|||
|
||||
| Configuration Option | Servers | Clients | Purpose |
|
||||
| -------------------- | ------- | ------- | ------- |
|
||||
| [`acl_datacenter`](/docs/agent/options.html#acl_datacenter) | `REQUIRED` | `REQUIRED` | Master control that enables ACLs by defining the authoritative Consul datacenter for ACLs |
|
||||
| [`primary_datacenter`](/docs/agent/options.html#primary_datacenter) | `REQUIRED` | `REQUIRED` | Master control that enables ACLs by defining the authoritative Consul datacenter for ACLs |
|
||||
| [`acl_default_policy`](/docs/agent/options.html#acl_default_policy) | `OPTIONAL` | `N/A` | Determines whitelist or blacklist mode |
|
||||
| [`acl_down_policy`](/docs/agent/options.html#acl_down_policy) | `OPTIONAL` | `OPTIONAL` | Determines what to do when the ACL datacenter is offline |
|
||||
| [`acl_ttl`](/docs/agent/options.html#acl_ttl) | `OPTIONAL` | `OPTIONAL` | Determines time-to-live for cached ACLs |
|
||||
|
@ -200,7 +200,7 @@ Here's the corresponding JSON configuration file:
|
|||
|
||||
```json
|
||||
{
|
||||
"acl_datacenter": "dc1",
|
||||
"primary_datacenter": "dc1",
|
||||
"acl_master_token": "b1gs33cr3t",
|
||||
"acl_default_policy": "deny",
|
||||
"acl_down_policy": "extend-cache"
|
||||
|
@ -273,7 +273,7 @@ configuration and restart the servers once more to apply it:
|
|||
|
||||
```json
|
||||
{
|
||||
"acl_datacenter": "dc1",
|
||||
"primary_datacenter": "dc1",
|
||||
"acl_master_token": "b1gs33cr3t",
|
||||
"acl_default_policy": "deny",
|
||||
"acl_down_policy": "extend-cache",
|
||||
|
@ -310,7 +310,7 @@ with a configuration file that enables ACLs:
|
|||
|
||||
```json
|
||||
{
|
||||
"acl_datacenter": "dc1",
|
||||
"primary_datacenter": "dc1",
|
||||
"acl_down_policy": "extend-cache",
|
||||
"acl_agent_token": "fe3b8d40-0ee0-8783-6cc2-ab1aa9bb16c1"
|
||||
}
|
||||
|
@ -1029,7 +1029,7 @@ name that starts with "admin".
|
|||
#### Outages and ACL Replication
|
||||
|
||||
The Consul ACL system is designed with flexible rules to accommodate for an outage
|
||||
of the [`acl_datacenter`](/docs/agent/options.html#acl_datacenter) or networking
|
||||
of the [`primary_datacenter`](/docs/agent/options.html#primary_datacenter) or networking
|
||||
issues preventing access to it. In this case, it may be impossible for
|
||||
agents in non-authoritative datacenters to resolve tokens. Consul provides
|
||||
a number of configurable [`acl_down_policy`](/docs/agent/options.html#acl_down_policy)
|
||||
|
@ -1082,10 +1082,10 @@ using the [ACL replication status](/api/acl.html#acl_replication_status)
|
|||
endpoint.
|
||||
2. Turn down the old authoritative datacenter servers.
|
||||
3. Rolling restart the agents in the target datacenter and change the
|
||||
`acl_datacenter` servers to itself. This will automatically turn off
|
||||
`primary_datacenter` servers to itself. This will automatically turn off
|
||||
replication and will enable the datacenter to start acting as the authoritative
|
||||
datacenter, using its replicated ACLs from before.
|
||||
3. Rolling restart the agents in other datacenters and change their `acl_datacenter`
|
||||
3. Rolling restart the agents in other datacenters and change their `primary_datacenter`
|
||||
configuration to the target datacenter.
|
||||
|
||||
<a name="version_8_acls"></a>
|
||||
|
@ -1133,11 +1133,11 @@ Since clients now resolve ACLs locally, the [`acl_down_policy`](/docs/agent/opti
|
|||
now applies to Consul clients as well as Consul servers. This will determine what the
|
||||
client will do in the event that the servers are down.
|
||||
|
||||
Consul clients must have [`acl_datacenter`](/docs/agent/options.html#acl_datacenter) configured
|
||||
Consul clients must have [`primary_datacenter`](/docs/agent/options.html#primary_datacenter) configured
|
||||
in order to enable agent-level ACL features. If this is set, the agents will contact the Consul
|
||||
servers to determine if ACLs are enabled at the cluster level. If they detect that ACLs are not
|
||||
enabled, they will check at most every 2 minutes to see if they have become enabled, and will
|
||||
start enforcing ACLs automatically. If an agent has an `acl_datacenter` defined, operators will
|
||||
start enforcing ACLs automatically. If an agent has an `primary_datacenter` defined, operators will
|
||||
need to use the [`acl_agent_master_token`](/docs/agent/options.html#acl_agent_master_token) to
|
||||
perform agent-level operations if the Consul servers aren't present (such as for a manual join
|
||||
to the cluster), unless the [`acl_down_policy`](/docs/agent/options.html#acl_down_policy) on the
|
||||
|
|
|
@ -66,7 +66,7 @@ The following can be used to manage network areas:
|
|||
|
||||
Networks areas can be used alongside the Consul's [Basic Federation](/docs/guides/datacenters.html)
|
||||
model and the WAN gossip pool. This helps ease migration, and clusters like the
|
||||
[ACL datacenter](/docs/agent/options.html#acl_datacenter) are more easily managed via
|
||||
[primary datacenter](/docs/agent/options.html#primary_datacenter) are more easily managed via
|
||||
the WAN because they need to be available to all Consul datacenters.
|
||||
|
||||
A peer datacenter can connected via the WAN gossip pool and a network area at the
|
||||
|
|
Loading…
Reference in New Issue