Merge pull request #14556 from hashicorp/NET-818-server-cert-v2
This commit is contained in:
commit
da5900b4ac
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
peering: adds an internally managed server certificate for automatic TLS between servers in peer clusters.
|
||||
```
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/checks"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/agent/consul"
|
||||
"github.com/hashicorp/consul/agent/consul/servercert"
|
||||
"github.com/hashicorp/consul/agent/dns"
|
||||
external "github.com/hashicorp/consul/agent/grpc-external"
|
||||
"github.com/hashicorp/consul/agent/local"
|
||||
|
@ -353,6 +354,9 @@ type Agent struct {
|
|||
// based on the current consul configuration.
|
||||
tlsConfigurator *tlsutil.Configurator
|
||||
|
||||
// certManager manages the lifecycle of the internally-managed server certificate.
|
||||
certManager *servercert.CertManager
|
||||
|
||||
// httpConnLimiter is used to limit connections to the HTTP server by client
|
||||
// IP.
|
||||
httpConnLimiter connlimit.Limiter
|
||||
|
@ -583,6 +587,24 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
return fmt.Errorf("Failed to start Consul server: %v", err)
|
||||
}
|
||||
a.delegate = server
|
||||
|
||||
if a.config.PeeringEnabled && a.config.ConnectEnabled {
|
||||
d := servercert.Deps{
|
||||
Logger: a.logger.Named("server.cert-manager"),
|
||||
Config: servercert.Config{
|
||||
Datacenter: a.config.Datacenter,
|
||||
ACLsEnabled: a.config.ACLsEnabled,
|
||||
},
|
||||
Cache: a.cache,
|
||||
GetStore: func() servercert.Store { return server.FSM().State() },
|
||||
TLSConfigurator: a.tlsConfigurator,
|
||||
}
|
||||
a.certManager = servercert.NewCertManager(d)
|
||||
if err := a.certManager.Start(&lib.StopChannelContext{StopCh: a.shutdownCh}); err != nil {
|
||||
return fmt.Errorf("failed to start server cert manager: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
client, err := consul.NewClient(consulCfg, a.baseDeps.Deps)
|
||||
if err != nil {
|
||||
|
|
|
@ -1677,7 +1677,6 @@ func TestAgent_Reload(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
dc1 := "dc1"
|
||||
a := NewTestAgent(t, `
|
||||
services = [
|
||||
|
@ -5499,7 +5498,6 @@ func TestAgent_DeregisterService_ACLDeny(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, TestACLConfig())
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
@ -5869,7 +5867,6 @@ func TestAgent_Monitor(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
@ -6519,9 +6516,9 @@ func TestAgentConnectCARoots_list(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, "")
|
||||
// Disable peering to avoid setting up a roots watch for the server certificate,
|
||||
// which leads to cache hit on the first query below.
|
||||
a := NewTestAgent(t, "peering { enabled = false }")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
|
@ -6751,8 +6748,6 @@ func TestAgentConnectCALeafCert_good(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
a := StartTestAgent(t, TestAgent{Overrides: `
|
||||
connect {
|
||||
test_ca_leaf_root_change_spread = "1ns"
|
||||
|
|
|
@ -5984,6 +5984,71 @@ func TestAgent_startListeners(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestAgent_ServerCertificate(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
const expectURI = "spiffe://11111111-2222-3333-4444-555555555555.consul/agent/server/dc/dc1"
|
||||
|
||||
// Leader should acquire a sever cert after bootstrapping.
|
||||
a1 := NewTestAgent(t, `
|
||||
node_name = "a1"
|
||||
acl {
|
||||
enabled = true
|
||||
tokens {
|
||||
initial_management = "root"
|
||||
default = "root"
|
||||
}
|
||||
}
|
||||
connect {
|
||||
enabled = true
|
||||
}
|
||||
peering {
|
||||
enabled = true
|
||||
}`)
|
||||
defer a1.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a1.RPC, "dc1")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
cert := a1.tlsConfigurator.AutoEncryptCert()
|
||||
require.NotNil(r, cert)
|
||||
require.Len(r, cert.URIs, 1)
|
||||
require.Equal(r, expectURI, cert.URIs[0].String())
|
||||
})
|
||||
|
||||
// Join a follower, and it should be able to acquire a server cert as well.
|
||||
a2 := NewTestAgent(t, `
|
||||
node_name = "a2"
|
||||
bootstrap = false
|
||||
acl {
|
||||
enabled = true
|
||||
tokens {
|
||||
initial_management = "root"
|
||||
default = "root"
|
||||
}
|
||||
}
|
||||
connect {
|
||||
enabled = true
|
||||
}
|
||||
peering {
|
||||
enabled = true
|
||||
}`)
|
||||
defer a2.Shutdown()
|
||||
|
||||
_, err := a2.JoinLAN([]string{fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortLAN)}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
testrpc.WaitForTestAgent(t, a2.RPC, "dc1")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
cert := a2.tlsConfigurator.AutoEncryptCert()
|
||||
require.NotNil(r, cert)
|
||||
require.Len(r, cert.URIs, 1)
|
||||
require.Equal(r, expectURI, cert.URIs[0].String())
|
||||
})
|
||||
}
|
||||
|
||||
func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool {
|
||||
pool := x509.NewCertPool()
|
||||
data, err := ioutil.ReadFile("../test/ca/root.cer")
|
||||
|
|
|
@ -540,7 +540,9 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
|||
var id connect.CertURI
|
||||
var dnsNames []string
|
||||
var ipAddresses []net.IP
|
||||
if req.Service != "" {
|
||||
|
||||
switch {
|
||||
case req.Service != "":
|
||||
id = &connect.SpiffeIDService{
|
||||
Host: roots.TrustDomain,
|
||||
Datacenter: req.Datacenter,
|
||||
|
@ -549,7 +551,8 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
|||
Service: req.Service,
|
||||
}
|
||||
dnsNames = append(dnsNames, req.DNSSAN...)
|
||||
} else if req.Agent != "" {
|
||||
|
||||
case req.Agent != "":
|
||||
id = &connect.SpiffeIDAgent{
|
||||
Host: roots.TrustDomain,
|
||||
Datacenter: req.Datacenter,
|
||||
|
@ -558,19 +561,30 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
|||
}
|
||||
dnsNames = append([]string{"localhost"}, req.DNSSAN...)
|
||||
ipAddresses = append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, req.IPSAN...)
|
||||
} else if req.Kind != "" {
|
||||
if req.Kind != structs.ServiceKindMeshGateway {
|
||||
return result, fmt.Errorf("unsupported kind: %s", req.Kind)
|
||||
}
|
||||
|
||||
case req.Kind == structs.ServiceKindMeshGateway:
|
||||
id = &connect.SpiffeIDMeshGateway{
|
||||
Host: roots.TrustDomain,
|
||||
Datacenter: req.Datacenter,
|
||||
Partition: req.TargetPartition(),
|
||||
}
|
||||
dnsNames = append(dnsNames, req.DNSSAN...)
|
||||
} else {
|
||||
return result, errors.New("URI must be either service, agent, or kind")
|
||||
|
||||
case req.Kind != "":
|
||||
return result, fmt.Errorf("unsupported kind: %s", req.Kind)
|
||||
|
||||
case req.Server:
|
||||
if req.Datacenter == "" {
|
||||
return result, errors.New("datacenter name must be specified")
|
||||
}
|
||||
id = &connect.SpiffeIDServer{
|
||||
Host: roots.TrustDomain,
|
||||
Datacenter: req.Datacenter,
|
||||
}
|
||||
dnsNames = append(dnsNames, connect.PeeringServerSAN(req.Datacenter, roots.TrustDomain))
|
||||
|
||||
default:
|
||||
return result, errors.New("URI must be either service, agent, server, or kind")
|
||||
}
|
||||
|
||||
// Create a new private key
|
||||
|
@ -676,16 +690,19 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
|||
type ConnectCALeafRequest struct {
|
||||
Token string
|
||||
Datacenter string
|
||||
Service string // Service name, not ID
|
||||
Agent string // Agent name, not ID
|
||||
Kind structs.ServiceKind // only mesh-gateway for now
|
||||
DNSSAN []string
|
||||
IPSAN []net.IP
|
||||
MinQueryIndex uint64
|
||||
MaxQueryTime time.Duration
|
||||
acl.EnterpriseMeta
|
||||
MustRevalidate bool
|
||||
|
||||
acl.EnterpriseMeta
|
||||
// The following flags indicate the entity we are requesting a cert for.
|
||||
// Only one of these must be specified.
|
||||
Service string // Given a Service name, not ID, the request is for a SpiffeIDService.
|
||||
Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent.
|
||||
Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported.
|
||||
Server bool // If true, the request is for a SpiffeIDServer.
|
||||
}
|
||||
|
||||
func (r *ConnectCALeafRequest) Key() string {
|
||||
|
@ -711,6 +728,14 @@ func (r *ConnectCALeafRequest) Key() string {
|
|||
}
|
||||
case r.Kind != "":
|
||||
// this is not valid
|
||||
case r.Server:
|
||||
v, err := hashstructure.Hash([]interface{}{
|
||||
"server",
|
||||
r.Datacenter,
|
||||
}, nil)
|
||||
if err == nil {
|
||||
return fmt.Sprintf("server:%d", v)
|
||||
}
|
||||
default:
|
||||
v, err := hashstructure.Hash([]interface{}{
|
||||
r.Service,
|
||||
|
|
|
@ -1164,4 +1164,11 @@ func TestConnectCALeaf_Key(t *testing.T) {
|
|||
})
|
||||
})
|
||||
})
|
||||
t.Run("server", func(t *testing.T) {
|
||||
r1 := key(ConnectCALeafRequest{
|
||||
Server: true,
|
||||
Datacenter: "us-east",
|
||||
})
|
||||
require.True(t, strings.HasPrefix(r1, "server:"), "Key %s does not start with server:", r1)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -2596,6 +2596,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
|
|||
mapCommon("grpc", t.GRPC, &c.GRPC)
|
||||
c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false)
|
||||
|
||||
c.ServerMode = rt.ServerMode
|
||||
c.ServerName = rt.ServerName
|
||||
c.NodeName = rt.NodeName
|
||||
c.Domain = rt.DNSDomain
|
||||
|
|
|
@ -66,6 +66,7 @@ func TestLoad_IntegrationWithFlags_OSS(t *testing.T) {
|
|||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
|
|
@ -177,6 +177,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
expected: func(rt *RuntimeConfig) {
|
||||
rt.Bootstrap = true
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
|
@ -194,6 +195,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
expected: func(rt *RuntimeConfig) {
|
||||
rt.BootstrapExpect = 3
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
|
@ -208,6 +210,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
`-data-dir=` + dataDir,
|
||||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.TLS.ServerMode = false
|
||||
rt.ClientAddrs = []*net.IPAddr{ipAddr("1.2.3.4")}
|
||||
rt.DNSAddrs = []net.Addr{tcpAddr("1.2.3.4:8600"), udpAddr("1.2.3.4:8600")}
|
||||
rt.HTTPAddrs = []net.Addr{tcpAddr("1.2.3.4:8500")}
|
||||
|
@ -319,6 +322,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.SerfBindAddrLAN = tcpAddr("127.0.0.1:8301")
|
||||
rt.SerfBindAddrWAN = tcpAddr("127.0.0.1:8302")
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.TaggedAddresses = map[string]string{
|
||||
"lan": "127.0.0.1",
|
||||
|
@ -659,6 +663,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.DataDir = dataDir
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -841,6 +846,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
|
@ -1881,6 +1887,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.BootstrapExpect = 0
|
||||
rt.LeaveOnTerm = false
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -1898,6 +1905,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.BootstrapExpect = 2
|
||||
rt.LeaveOnTerm = false
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -1918,6 +1926,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.BootstrapExpect = 4
|
||||
rt.LeaveOnTerm = false
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -1937,6 +1946,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
expected: func(rt *RuntimeConfig) {
|
||||
rt.LeaveOnTerm = true
|
||||
rt.ServerMode = false
|
||||
rt.TLS.ServerMode = false
|
||||
rt.SkipLeaveOnInt = false
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
|
@ -3056,6 +3066,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -3087,6 +3098,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -3115,6 +3127,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -3140,6 +3153,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.ConnectEnabled = true
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -3162,6 +3176,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
// rpc.enable_streaming make no sense in not-server mode
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
rt.ServerMode = false
|
||||
rt.TLS.ServerMode = false
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
|
@ -3185,6 +3200,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.UseStreamingBackend = true
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
},
|
||||
|
@ -3602,6 +3618,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.ConnectMeshGatewayWANFederationEnabled = true
|
||||
// server things
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -5023,6 +5040,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.DataDir = dataDir
|
||||
rt.LeaveOnTerm = false
|
||||
rt.ServerMode = true
|
||||
rt.TLS.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.TLS.InternalRPC.CertFile = "foo"
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
|
@ -6441,6 +6459,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
},
|
||||
NodeName: "otlLxGaI",
|
||||
ServerName: "Oerr9n1G",
|
||||
ServerMode: true,
|
||||
Domain: "7W1xXSqd",
|
||||
EnableAgentTLSForChecks: true,
|
||||
},
|
||||
|
|
|
@ -374,10 +374,10 @@
|
|||
"CipherSuites": [],
|
||||
"KeyFile": "hidden",
|
||||
"TLSMinVersion": "",
|
||||
"UseAutoCert": false,
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
"VerifyServerHostname": false
|
||||
},
|
||||
"HTTPS": {
|
||||
"CAFile": "",
|
||||
|
@ -386,10 +386,10 @@
|
|||
"CipherSuites": [],
|
||||
"KeyFile": "hidden",
|
||||
"TLSMinVersion": "",
|
||||
"UseAutoCert": false,
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
"VerifyServerHostname": false
|
||||
},
|
||||
"InternalRPC": {
|
||||
"CAFile": "",
|
||||
|
@ -398,12 +398,13 @@
|
|||
"CipherSuites": [],
|
||||
"KeyFile": "hidden",
|
||||
"TLSMinVersion": "",
|
||||
"UseAutoCert": false,
|
||||
"VerifyIncoming": false,
|
||||
"VerifyOutgoing": false,
|
||||
"VerifyServerHostname": false,
|
||||
"UseAutoCert": false
|
||||
"VerifyServerHostname": false
|
||||
},
|
||||
"NodeName": "",
|
||||
"ServerMode": false,
|
||||
"ServerName": ""
|
||||
},
|
||||
"TaggedAddresses": {},
|
||||
|
|
|
@ -18,3 +18,9 @@ func (id SpiffeIDServer) URI() *url.URL {
|
|||
result.Path = fmt.Sprintf("/agent/server/dc/%s", id.Datacenter)
|
||||
return &result
|
||||
}
|
||||
|
||||
// PeeringServerSAN returns the DNS SAN to attach to server certificates
|
||||
// for control-plane peering traffic.
|
||||
func PeeringServerSAN(dc, trustDomain string) string {
|
||||
return fmt.Sprintf("server.%s.peering.%s", dc, trustDomain)
|
||||
}
|
||||
|
|
|
@ -157,3 +157,9 @@ func TestSpiffeIDServer_URI(t *testing.T) {
|
|||
|
||||
require.Equal(t, "spiffe://1234.consul/agent/server/dc/dc1", srv.URI().String())
|
||||
}
|
||||
|
||||
func TestServerSAN(t *testing.T) {
|
||||
san := PeeringServerSAN("dc1", TestTrustDomain)
|
||||
expect := "server.dc1.peering." + TestTrustDomain
|
||||
require.Equal(t, expect, san)
|
||||
}
|
||||
|
|
|
@ -132,6 +132,7 @@ type ACLResolverBackend interface {
|
|||
ResolveIdentityFromToken(token string) (bool, structs.ACLIdentity, error)
|
||||
ResolvePolicyFromID(policyID string) (bool, *structs.ACLPolicy, error)
|
||||
ResolveRoleFromID(roleID string) (bool, *structs.ACLRole, error)
|
||||
IsServerManagementToken(token string) bool
|
||||
// TODO: separate methods for each RPC call (there are 4)
|
||||
RPC(method string, args interface{}, reply interface{}) error
|
||||
EnterpriseACLResolverDelegate
|
||||
|
@ -980,6 +981,10 @@ func (r *ACLResolver) resolveLocallyManagedToken(token string) (structs.ACLIdent
|
|||
return structs.NewAgentRecoveryTokenIdentity(r.config.NodeName, token), r.agentRecoveryAuthz, true
|
||||
}
|
||||
|
||||
if r.backend.IsServerManagementToken(token) {
|
||||
return structs.NewACLServerIdentity(token), acl.ManageAll(), true
|
||||
}
|
||||
|
||||
return r.resolveLocallyManagedEnterpriseToken(token)
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,10 @@ type clientACLResolverBackend struct {
|
|||
*Client
|
||||
}
|
||||
|
||||
func (c *clientACLResolverBackend) IsServerManagementToken(_ string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *clientACLResolverBackend) ACLDatacenter() string {
|
||||
// For resolution running on clients servers within the current datacenter
|
||||
// must be queried first to pick up local tokens.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -108,6 +109,19 @@ type serverACLResolverBackend struct {
|
|||
*Server
|
||||
}
|
||||
|
||||
func (s *serverACLResolverBackend) IsServerManagementToken(token string) bool {
|
||||
mgmt, err := s.getSystemMetadata(structs.ServerManagementTokenAccessorID)
|
||||
if err != nil {
|
||||
s.logger.Debug("failed to fetch server management token: %w", err)
|
||||
return false
|
||||
}
|
||||
if mgmt == "" {
|
||||
s.logger.Debug("server management token has not been initialized")
|
||||
return false
|
||||
}
|
||||
return subtle.ConstantTimeCompare([]byte(mgmt), []byte(token)) == 1
|
||||
}
|
||||
|
||||
func (s *serverACLResolverBackend) ACLDatacenter() string {
|
||||
// For resolution running on servers the only option is to contact the
|
||||
// configured ACL Datacenter
|
||||
|
|
|
@ -438,6 +438,8 @@ type ACLResolverTestDelegate struct {
|
|||
// testRoles is used by plainRoleResolveFn if not nil
|
||||
testRoles map[string]*structs.ACLRole
|
||||
|
||||
testServerManagementToken string
|
||||
|
||||
localTokenResolutions int32
|
||||
remoteTokenResolutions int32
|
||||
localPolicyResolutions int32
|
||||
|
@ -456,6 +458,10 @@ type ACLResolverTestDelegate struct {
|
|||
EnterpriseACLResolverTestDelegate
|
||||
}
|
||||
|
||||
func (d *ACLResolverTestDelegate) IsServerManagementToken(token string) bool {
|
||||
return token == d.testServerManagementToken
|
||||
}
|
||||
|
||||
// UseTestLocalData will force delegate-local maps to be used in lieu of the
|
||||
// global factory functions.
|
||||
func (d *ACLResolverTestDelegate) UseTestLocalData(data []interface{}) {
|
||||
|
@ -2187,6 +2193,27 @@ func TestACLResolver_AgentRecovery(t *testing.T) {
|
|||
require.Equal(t, acl.Deny, authz.NodeWrite("bar", nil))
|
||||
}
|
||||
|
||||
func TestACLResolver_ServerManagementToken(t *testing.T) {
|
||||
const testToken = "1bb0900e-3683-46a5-b04c-4882d7773b83"
|
||||
|
||||
d := &ACLResolverTestDelegate{
|
||||
datacenter: "dc1",
|
||||
enabled: true,
|
||||
testServerManagementToken: testToken,
|
||||
}
|
||||
r := newTestACLResolver(t, d, func(cfg *ACLResolverConfig) {
|
||||
cfg.Tokens = &token.Store{}
|
||||
cfg.Config.NodeName = "foo"
|
||||
})
|
||||
|
||||
authz, err := r.ResolveToken(testToken)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, authz.ACLIdentity)
|
||||
require.Equal(t, structs.ServerManagementTokenAccessorID, authz.ACLIdentity.ID())
|
||||
require.NotNil(t, authz.Authorizer)
|
||||
require.Equal(t, acl.ManageAll(), authz.Authorizer)
|
||||
}
|
||||
|
||||
func TestACLResolver_ACLsEnabled(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
|
|
|
@ -501,6 +501,7 @@ func (s *Server) initializeACLs(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Insert the anonymous token if it does not exist.
|
||||
state := s.fsm.State()
|
||||
_, token, err := state.ACLTokenGetBySecret(nil, anonymousToken, nil)
|
||||
if err != nil {
|
||||
|
@ -527,6 +528,20 @@ func (s *Server) initializeACLs(ctx context.Context) error {
|
|||
}
|
||||
s.logger.Info("Created ACL anonymous token from configuration")
|
||||
}
|
||||
|
||||
// Generate or rotate the server management token on leadership transitions.
|
||||
// This token is used by Consul servers for authn/authz when making
|
||||
// requests to themselves through public APIs such as the agent cache.
|
||||
// It is stored as system metadata because it is internally
|
||||
// managed and users are not meant to see it or interact with it.
|
||||
secretID, err := lib.GenerateUUID(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate the secret ID for the server management token: %w", err)
|
||||
}
|
||||
if err := s.setSystemMetadataKey(structs.ServerManagementTokenAccessorID, secretID); err != nil {
|
||||
return fmt.Errorf("failed to persist server management token: %w", err)
|
||||
}
|
||||
|
||||
// launch the upgrade go routine to generate accessors for everything
|
||||
s.startACLUpgrade(ctx)
|
||||
} else {
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -1295,6 +1296,13 @@ func TestLeader_ACL_Initialization(t *testing.T) {
|
|||
_, policy, err := s1.fsm.State().ACLPolicyGetByID(nil, structs.ACLPolicyGlobalManagementID, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, policy)
|
||||
|
||||
serverToken, err := s1.getSystemMetadata(structs.ServerManagementTokenAccessorID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, serverToken)
|
||||
|
||||
_, err = uuid.ParseUUID(serverToken)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,267 @@
|
|||
package servercert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib/retry"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
// Correlation ID for leaf cert watches.
|
||||
const leafWatchID = "leaf"
|
||||
|
||||
// Cache is an interface to represent the necessary methods of the agent/cache.Cache.
|
||||
// It is used to request and renew the server leaf certificate.
|
||||
type Cache interface {
|
||||
Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error
|
||||
}
|
||||
|
||||
// TLSConfigurator is an interface to represent the necessary methods of the tlsutil.Configurator.
|
||||
// It is used to apply the server leaf certificate and server name.
|
||||
type TLSConfigurator interface {
|
||||
UpdateAutoTLSCert(pub, priv string) error
|
||||
UpdateAutoTLSPeeringServerName(name string)
|
||||
}
|
||||
|
||||
// Store is an interface to represent the necessary methods of the state.Store.
|
||||
// It is used to fetch the CA Config to getStore the trust domain in the TLSConfigurator.
|
||||
type Store interface {
|
||||
CAConfig(ws memdb.WatchSet) (uint64, *structs.CAConfiguration, error)
|
||||
SystemMetadataGet(ws memdb.WatchSet, key string) (uint64, *structs.SystemMetadataEntry, error)
|
||||
AbandonCh() <-chan struct{}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// Datacenter is the datacenter name the server is configured with.
|
||||
Datacenter string
|
||||
|
||||
// ACLsEnabled indicates whether the ACL system is enabled on this server.
|
||||
ACLsEnabled bool
|
||||
}
|
||||
|
||||
type Deps struct {
|
||||
Config Config
|
||||
Logger hclog.Logger
|
||||
Cache Cache
|
||||
GetStore func() Store
|
||||
TLSConfigurator TLSConfigurator
|
||||
waiter retry.Waiter
|
||||
}
|
||||
|
||||
// CertManager is responsible for requesting and renewing the leaf cert for server agents.
|
||||
// The server certificate is managed internally and used for peering control-plane traffic
|
||||
// to the TLS-enabled external gRPC port.
|
||||
type CertManager struct {
|
||||
logger hclog.Logger
|
||||
|
||||
// config contains agent configuration necessary for the cert manager to operate.
|
||||
config Config
|
||||
|
||||
// cache provides an API to issue internal RPC requests and receive notifications
|
||||
// when there are changes.
|
||||
cache Cache
|
||||
|
||||
// cacheUpdateCh receives notifications of cache update events for resources watched.
|
||||
cacheUpdateCh chan cache.UpdateEvent
|
||||
|
||||
// getStore returns the server state getStore for read-only access.
|
||||
getStore func() Store
|
||||
|
||||
// tlsConfigurator receives the leaf cert and peering server name updates from the cert manager.
|
||||
tlsConfigurator TLSConfigurator
|
||||
|
||||
// waiter contains the waiter for exponential backoff between retries.
|
||||
waiter retry.Waiter
|
||||
}
|
||||
|
||||
func NewCertManager(deps Deps) *CertManager {
|
||||
return &CertManager{
|
||||
config: deps.Config,
|
||||
logger: deps.Logger,
|
||||
cache: deps.Cache,
|
||||
cacheUpdateCh: make(chan cache.UpdateEvent, 1),
|
||||
getStore: deps.GetStore,
|
||||
tlsConfigurator: deps.TLSConfigurator,
|
||||
waiter: retry.Waiter{
|
||||
MinFailures: 1,
|
||||
MinWait: 1 * time.Second,
|
||||
MaxWait: 5 * time.Minute,
|
||||
Jitter: retry.NewJitter(20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *CertManager) Start(ctx context.Context) error {
|
||||
if err := m.initializeWatches(ctx); err != nil {
|
||||
return fmt.Errorf("failed to set up certificate watches: %w", err)
|
||||
}
|
||||
go m.handleUpdates(ctx)
|
||||
|
||||
m.logger.Info("initialized server certificate management")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CertManager) initializeWatches(ctx context.Context) error {
|
||||
if m.config.ACLsEnabled {
|
||||
// If ACLs are enabled we need to watch for server token updates and set/reset
|
||||
// leaf cert updates as token updates arrive.
|
||||
go m.watchServerToken(ctx)
|
||||
} else {
|
||||
// If ACLs are disabled we set up a single cache notification for leaf certs.
|
||||
if err := m.watchLeafCert(ctx); err != nil {
|
||||
return fmt.Errorf("failed to watch leaf: %w", err)
|
||||
}
|
||||
}
|
||||
go m.watchCAConfig(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CertManager) watchServerToken(ctx context.Context) {
|
||||
// We keep the last iteration's cancel function to reset watches.
|
||||
var (
|
||||
notifyCtx context.Context
|
||||
cancel context.CancelFunc = func() {}
|
||||
)
|
||||
retryLoopBackoff(ctx, m.waiter, func() error {
|
||||
ws := memdb.NewWatchSet()
|
||||
ws.Add(m.getStore().AbandonCh())
|
||||
|
||||
_, token, err := m.getStore().SystemMetadataGet(ws, structs.ServerManagementTokenAccessorID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == nil {
|
||||
m.logger.Debug("ACLs have not finished initializing")
|
||||
return nil
|
||||
}
|
||||
if token.Value == "" {
|
||||
// This should never happen. If the leader stored a token with this key it will not be empty.
|
||||
return fmt.Errorf("empty token")
|
||||
}
|
||||
m.logger.Debug("server management token watch fired - resetting leaf cert watch")
|
||||
|
||||
// Cancel existing the leaf cert watch and spin up new one any time the server token changes.
|
||||
// The watch needs the current token as set by the leader since certificate signing requests go to the leader.
|
||||
fmt.Println("canceling and resetting")
|
||||
cancel()
|
||||
notifyCtx, cancel = context.WithCancel(ctx)
|
||||
|
||||
req := cachetype.ConnectCALeafRequest{
|
||||
Datacenter: m.config.Datacenter,
|
||||
Token: token.Value,
|
||||
Server: true,
|
||||
}
|
||||
if err := m.cache.Notify(notifyCtx, cachetype.ConnectCALeafName, &req, leafWatchID, m.cacheUpdateCh); err != nil {
|
||||
return fmt.Errorf("failed to setup leaf cert notifications: %w", err)
|
||||
}
|
||||
|
||||
ws.WatchCtx(ctx)
|
||||
return nil
|
||||
|
||||
}, func(err error) {
|
||||
m.logger.Error("failed to watch server management token", "error", err)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *CertManager) watchLeafCert(ctx context.Context) error {
|
||||
req := cachetype.ConnectCALeafRequest{
|
||||
Datacenter: m.config.Datacenter,
|
||||
Server: true,
|
||||
}
|
||||
if err := m.cache.Notify(ctx, cachetype.ConnectCALeafName, &req, leafWatchID, m.cacheUpdateCh); err != nil {
|
||||
return fmt.Errorf("failed to setup leaf cert notifications: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CertManager) watchCAConfig(ctx context.Context) {
|
||||
retryLoopBackoff(ctx, m.waiter, func() error {
|
||||
ws := memdb.NewWatchSet()
|
||||
ws.Add(m.getStore().AbandonCh())
|
||||
|
||||
_, conf, err := m.getStore().CAConfig(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch CA configuration from the state getStore: %w", err)
|
||||
}
|
||||
if conf == nil || conf.ClusterID == "" {
|
||||
m.logger.Debug("CA has not finished initializing")
|
||||
return nil
|
||||
}
|
||||
|
||||
id := connect.SpiffeIDSigningForCluster(conf.ClusterID)
|
||||
name := connect.PeeringServerSAN(m.config.Datacenter, id.Host())
|
||||
|
||||
m.logger.Debug("CA config watch fired - updating auto TLS server name", "name", name)
|
||||
m.tlsConfigurator.UpdateAutoTLSPeeringServerName(name)
|
||||
|
||||
ws.WatchCtx(ctx)
|
||||
return nil
|
||||
|
||||
}, func(err error) {
|
||||
m.logger.Error("failed to watch CA config", "error", err)
|
||||
})
|
||||
}
|
||||
|
||||
func retryLoopBackoff(ctx context.Context, waiter retry.Waiter, loopFn func() error, errorFn func(error)) {
|
||||
for {
|
||||
if err := waiter.Wait(ctx); err != nil {
|
||||
// The error will only be non-nil if the context is canceled.
|
||||
return
|
||||
}
|
||||
|
||||
if err := loopFn(); err != nil {
|
||||
errorFn(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Reset the failure count seen by the waiter if there was no error.
|
||||
waiter.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *CertManager) handleUpdates(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.logger.Debug("context canceled")
|
||||
return
|
||||
|
||||
case event := <-m.cacheUpdateCh:
|
||||
m.logger.Debug("got cache update event", "correlationID", event.CorrelationID, "error", event.Err)
|
||||
|
||||
if err := m.handleLeafUpdate(event); err != nil {
|
||||
m.logger.Error("failed to handle cache update event", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *CertManager) handleLeafUpdate(event cache.UpdateEvent) error {
|
||||
if event.Err != nil {
|
||||
return fmt.Errorf("leaf cert watch returned an error: %w", event.Err)
|
||||
}
|
||||
if event.CorrelationID != leafWatchID {
|
||||
return fmt.Errorf("got unexpected update correlation ID %q while expecting %q", event.CorrelationID, leafWatchID)
|
||||
}
|
||||
|
||||
leaf, ok := event.Result.(*structs.IssuedCert)
|
||||
if !ok {
|
||||
return fmt.Errorf("got invalid type in leaf cert watch response: %T", event.Result)
|
||||
}
|
||||
|
||||
m.logger.Debug("leaf certificate watch fired - updating auto TLS certificate", "uri", leaf.ServerURI)
|
||||
|
||||
if err := m.tlsConfigurator.UpdateAutoTLSCert(leaf.CertPEM, leaf.PrivateKeyPEM); err != nil {
|
||||
return fmt.Errorf("failed to getStore the server leaf cert: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,296 @@
|
|||
package servercert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib/retry"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type fakeStore struct {
|
||||
// conf is the current CA configuration stored in the fakeStore.
|
||||
conf chan *structs.CAConfiguration
|
||||
|
||||
// tokenEntry is the current server token entry stored in the fakeStore.
|
||||
tokenEntry chan *structs.SystemMetadataEntry
|
||||
|
||||
// tokenCanceler will unblock the WatchSet for the token entry.
|
||||
tokenCanceler <-chan struct{}
|
||||
}
|
||||
|
||||
func (s *fakeStore) CAConfig(_ memdb.WatchSet) (uint64, *structs.CAConfiguration, error) {
|
||||
select {
|
||||
case conf := <-s.conf:
|
||||
return 0, conf, nil
|
||||
default:
|
||||
return 0, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fakeStore) setCAConfig() {
|
||||
s.conf <- &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fakeStore) SystemMetadataGet(ws memdb.WatchSet, _ string) (uint64, *structs.SystemMetadataEntry, error) {
|
||||
select {
|
||||
case entry := <-s.tokenEntry:
|
||||
ws.Add(s.tokenCanceler)
|
||||
return 0, entry, nil
|
||||
default:
|
||||
return 0, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fakeStore) setServerToken(token string, canceler <-chan struct{}) {
|
||||
s.tokenCanceler = canceler
|
||||
s.tokenEntry <- &structs.SystemMetadataEntry{
|
||||
Key: structs.ServerManagementTokenAccessorID,
|
||||
Value: token,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fakeStore) AbandonCh() <-chan struct{} {
|
||||
return make(<-chan struct{})
|
||||
}
|
||||
|
||||
type testCert struct {
|
||||
pub string
|
||||
priv string
|
||||
}
|
||||
|
||||
type fakeTLSConfigurator struct {
|
||||
cert testCert
|
||||
peeringServerName string
|
||||
|
||||
// syncCh is used to signal that an update was handled.
|
||||
// It synchronizes readers and writers in different goroutines.
|
||||
syncCh chan struct{}
|
||||
}
|
||||
|
||||
func (u *fakeTLSConfigurator) UpdateAutoTLSCert(pub, priv string) error {
|
||||
u.cert = testCert{
|
||||
pub: pub,
|
||||
priv: priv,
|
||||
}
|
||||
u.syncCh <- struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *fakeTLSConfigurator) UpdateAutoTLSPeeringServerName(name string) {
|
||||
u.peeringServerName = name
|
||||
u.syncCh <- struct{}{}
|
||||
}
|
||||
|
||||
func (u *fakeTLSConfigurator) timeoutIfNotUpdated(t *testing.T) error {
|
||||
t.Helper()
|
||||
|
||||
select {
|
||||
case <-u.syncCh:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatalf("timed out")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type watchInfo struct {
|
||||
ctx context.Context
|
||||
token string
|
||||
}
|
||||
|
||||
type fakeCache struct {
|
||||
updateCh chan<- cache.UpdateEvent
|
||||
|
||||
// watched is a map of watched correlation IDs to the ACL token of the request.
|
||||
watched map[string]watchInfo
|
||||
|
||||
// syncCh is used to signal that Notify was called.
|
||||
// It synchronizes readers and writers in different goroutines.
|
||||
syncCh chan struct{}
|
||||
}
|
||||
|
||||
func (c *fakeCache) triggerLeafUpdate() {
|
||||
c.updateCh <- cache.UpdateEvent{
|
||||
CorrelationID: leafWatchID,
|
||||
Result: &structs.IssuedCert{
|
||||
CertPEM: "cert-pem",
|
||||
PrivateKeyPEM: "key-pem",
|
||||
ServerURI: "test-uri",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeCache) Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error {
|
||||
c.watched[correlationID] = watchInfo{ctx: ctx, token: r.CacheInfo().Token}
|
||||
c.updateCh = ch
|
||||
c.syncCh <- struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeCache) timeoutIfNotUpdated(t *testing.T) error {
|
||||
t.Helper()
|
||||
|
||||
select {
|
||||
case <-c.syncCh:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatalf("timed out")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testWaiter() retry.Waiter {
|
||||
return retry.Waiter{
|
||||
MinFailures: 1,
|
||||
MinWait: 20 * time.Millisecond,
|
||||
MaxWait: 20 * time.Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
func TestCertManager_ACLsDisabled(t *testing.T) {
|
||||
tlsConfigurator := fakeTLSConfigurator{syncCh: make(chan struct{}, 1)}
|
||||
cache := fakeCache{watched: make(map[string]watchInfo), syncCh: make(chan struct{}, 1)}
|
||||
store := fakeStore{
|
||||
conf: make(chan *structs.CAConfiguration, 1),
|
||||
tokenEntry: make(chan *structs.SystemMetadataEntry, 1),
|
||||
}
|
||||
|
||||
mgr := NewCertManager(Deps{
|
||||
Logger: testutil.Logger(t),
|
||||
Config: Config{
|
||||
Datacenter: "my-dc",
|
||||
ACLsEnabled: false,
|
||||
},
|
||||
TLSConfigurator: &tlsConfigurator,
|
||||
Cache: &cache,
|
||||
GetStore: func() Store { return &store },
|
||||
})
|
||||
|
||||
// Override the default waiter to reduce time between retries.
|
||||
mgr.waiter = testWaiter()
|
||||
|
||||
require.NoError(t, mgr.Start(context.Background()))
|
||||
|
||||
testutil.RunStep(t, "initial empty state", func(t *testing.T) {
|
||||
require.Empty(t, tlsConfigurator.cert)
|
||||
require.Empty(t, tlsConfigurator.peeringServerName)
|
||||
|
||||
require.Contains(t, cache.watched, leafWatchID)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "leaf cert update", func(t *testing.T) {
|
||||
cache.triggerLeafUpdate()
|
||||
|
||||
// Wait for the update to arrive.
|
||||
require.NoError(t, tlsConfigurator.timeoutIfNotUpdated(t))
|
||||
|
||||
expect := testCert{
|
||||
pub: "cert-pem",
|
||||
priv: "key-pem",
|
||||
}
|
||||
require.Equal(t, expect, tlsConfigurator.cert)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "ca config update", func(t *testing.T) {
|
||||
store.setCAConfig()
|
||||
|
||||
// Wait for the update to arrive.
|
||||
require.NoError(t, tlsConfigurator.timeoutIfNotUpdated(t))
|
||||
|
||||
expect := connect.PeeringServerSAN(mgr.config.Datacenter, connect.TestTrustDomain)
|
||||
require.Equal(t, expect, tlsConfigurator.peeringServerName)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCertManager_ACLsEnabled(t *testing.T) {
|
||||
tlsConfigurator := fakeTLSConfigurator{syncCh: make(chan struct{}, 1)}
|
||||
cache := fakeCache{watched: make(map[string]watchInfo), syncCh: make(chan struct{}, 1)}
|
||||
store := fakeStore{
|
||||
conf: make(chan *structs.CAConfiguration, 1),
|
||||
tokenEntry: make(chan *structs.SystemMetadataEntry, 1),
|
||||
}
|
||||
|
||||
mgr := NewCertManager(Deps{
|
||||
Logger: testutil.Logger(t),
|
||||
Config: Config{
|
||||
Datacenter: "my-dc",
|
||||
ACLsEnabled: true,
|
||||
},
|
||||
TLSConfigurator: &tlsConfigurator,
|
||||
Cache: &cache,
|
||||
GetStore: func() Store { return &store },
|
||||
})
|
||||
|
||||
// Override the default waiter to reduce time between retries.
|
||||
mgr.waiter = testWaiter()
|
||||
|
||||
require.NoError(t, mgr.Start(context.Background()))
|
||||
|
||||
testutil.RunStep(t, "initial empty state", func(t *testing.T) {
|
||||
require.Empty(t, tlsConfigurator.cert)
|
||||
require.Empty(t, tlsConfigurator.peeringServerName)
|
||||
|
||||
require.Empty(t, cache.watched)
|
||||
})
|
||||
|
||||
var leafCtx context.Context
|
||||
tokenCanceler := make(chan struct{})
|
||||
|
||||
testutil.RunStep(t, "server token update", func(t *testing.T) {
|
||||
store.setServerToken("first-secret", tokenCanceler)
|
||||
|
||||
require.NoError(t, cache.timeoutIfNotUpdated(t))
|
||||
|
||||
require.Contains(t, cache.watched, leafWatchID)
|
||||
require.Equal(t, "first-secret", cache.watched[leafWatchID].token)
|
||||
|
||||
leafCtx = cache.watched[leafWatchID].ctx
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "leaf cert update", func(t *testing.T) {
|
||||
cache.triggerLeafUpdate()
|
||||
|
||||
// Wait for the update to arrive.
|
||||
require.NoError(t, tlsConfigurator.timeoutIfNotUpdated(t))
|
||||
|
||||
expect := testCert{
|
||||
pub: "cert-pem",
|
||||
priv: "key-pem",
|
||||
}
|
||||
require.Equal(t, expect, tlsConfigurator.cert)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "another server token update", func(t *testing.T) {
|
||||
store.setServerToken("second-secret", nil)
|
||||
|
||||
// Fire the existing WatchSet to simulate a state store update.
|
||||
tokenCanceler <- struct{}{}
|
||||
|
||||
// The leaf watch in the cache should have been reset.
|
||||
require.NoError(t, cache.timeoutIfNotUpdated(t))
|
||||
|
||||
// The original leaf watch context should have been canceled.
|
||||
require.Error(t, leafCtx.Err())
|
||||
|
||||
// A new leaf watch is expected with the new token.
|
||||
require.Contains(t, cache.watched, leafWatchID)
|
||||
require.Equal(t, "second-secret", cache.watched[leafWatchID].token)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "ca config update", func(t *testing.T) {
|
||||
store.setCAConfig()
|
||||
|
||||
// Wait for the update to arrive.
|
||||
require.NoError(t, tlsConfigurator.timeoutIfNotUpdated(t))
|
||||
|
||||
expect := connect.PeeringServerSAN(mgr.config.Datacenter, connect.TestTrustDomain)
|
||||
require.Equal(t, expect, tlsConfigurator.peeringServerName)
|
||||
})
|
||||
}
|
|
@ -104,6 +104,7 @@ type ACLIdentity interface {
|
|||
IsLocal() bool
|
||||
EnterpriseMetadata() *acl.EnterpriseMeta
|
||||
}
|
||||
|
||||
type ACLTokenPolicyLink struct {
|
||||
ID string
|
||||
Name string `hash:"ignore"`
|
||||
|
@ -1838,3 +1839,51 @@ func (id *AgentRecoveryTokenIdentity) IsLocal() bool {
|
|||
func (id *AgentRecoveryTokenIdentity) EnterpriseMetadata() *acl.EnterpriseMeta {
|
||||
return nil
|
||||
}
|
||||
|
||||
const ServerManagementTokenAccessorID = "server-management-token"
|
||||
|
||||
type ACLServerIdentity struct {
|
||||
secretID string
|
||||
}
|
||||
|
||||
func NewACLServerIdentity(secretID string) *ACLServerIdentity {
|
||||
return &ACLServerIdentity{
|
||||
secretID: secretID,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) ID() string {
|
||||
return ServerManagementTokenAccessorID
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) SecretToken() string {
|
||||
return i.secretID
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) PolicyIDs() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) RoleIDs() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) ServiceIdentityList() []*ACLServiceIdentity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) NodeIdentityList() []*ACLNodeIdentity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) IsExpired(asOf time.Time) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) IsLocal() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *ACLServerIdentity) EnterpriseMetadata() *acl.EnterpriseMeta {
|
||||
return acl.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
|
18
go.mod
18
go.mod
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/envoyproxy/go-control-plane v0.10.1
|
||||
github.com/fsnotify/fsnotify v1.5.1
|
||||
github.com/golang/protobuf v1.5.0
|
||||
github.com/google/go-cmp v0.5.7
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22
|
||||
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2
|
||||
|
@ -72,8 +72,8 @@ require (
|
|||
github.com/prometheus/client_golang v1.4.0
|
||||
github.com/rboyer/safeio v0.2.1
|
||||
github.com/ryanuber/columnize v2.1.2+incompatible
|
||||
github.com/shirou/gopsutil/v3 v3.21.10
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/shirou/gopsutil/v3 v3.22.8
|
||||
github.com/stretchr/testify v1.8.0
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.uber.org/goleak v1.1.10
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
||||
|
@ -107,7 +107,6 @@ require (
|
|||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible // indirect
|
||||
github.com/Microsoft/go-winio v0.4.3 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/speakeasy v0.1.0 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
|
@ -158,6 +157,7 @@ require (
|
|||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/posener/complete v1.2.3 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.9.1 // indirect
|
||||
|
@ -168,25 +168,25 @@ require (
|
|||
github.com/sirupsen/logrus v1.4.2 // indirect
|
||||
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/stretchr/objx v0.4.0 // indirect
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.9 // indirect
|
||||
github.com/tklauser/numcpus v0.3.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
|
||||
github.com/vmware/govmomi v0.18.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
go.opencensus.io v0.22.3 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/tools v0.1.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.28.0 // indirect
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/resty.v1 v1.12.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog v1.0.0 // indirect
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect
|
||||
|
|
38
go.sum
38
go.sum
|
@ -78,8 +78,6 @@ github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64
|
|||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
|
||||
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
@ -194,7 +192,6 @@ github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
|
@ -254,8 +251,9 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
|
@ -526,6 +524,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
|||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
|
@ -572,8 +572,8 @@ github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06q
|
|||
github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA=
|
||||
github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew=
|
||||
github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y=
|
||||
github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -596,21 +596,24 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI=
|
||||
github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=
|
||||
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
|
||||
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
|
||||
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
|
@ -623,6 +626,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
|
|||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
|
@ -788,14 +793,15 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
|
@ -864,7 +870,6 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
|||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
|
@ -971,8 +976,9 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
|
||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
@ -79,6 +79,7 @@ func (w *Waiter) delay() time.Duration {
|
|||
}
|
||||
|
||||
// Reset the failure count to 0.
|
||||
// Reset must be called if the operation done after Wait did not fail.
|
||||
func (w *Waiter) Reset() {
|
||||
w.failures = 0
|
||||
}
|
||||
|
@ -88,9 +89,13 @@ func (w *Waiter) Failures() int {
|
|||
return int(w.failures)
|
||||
}
|
||||
|
||||
// Wait increase the number of failures by one, and then blocks until the context
|
||||
// Wait increases the number of failures by one, and then blocks until the context
|
||||
// is cancelled, or until the wait time is reached.
|
||||
//
|
||||
// The wait time increases exponentially as the number of failures increases.
|
||||
// Every call to Wait increments the failures count, so Reset must be called
|
||||
// after Wait when there wasn't a failure.
|
||||
//
|
||||
// Wait will return ctx.Err() if the context is cancelled.
|
||||
func (w *Waiter) Wait(ctx context.Context) error {
|
||||
w.failures++
|
||||
|
|
|
@ -110,6 +110,10 @@ type ProtocolConfig struct {
|
|||
|
||||
// Config configures the Configurator.
|
||||
type Config struct {
|
||||
// ServerMode indicates whether the configurator is attached to a server
|
||||
// or client agent.
|
||||
ServerMode bool
|
||||
|
||||
// InternalRPC is used to configure the internal multiplexed RPC protocol.
|
||||
InternalRPC ProtocolConfig
|
||||
|
||||
|
@ -199,13 +203,15 @@ type Configurator struct {
|
|||
https protocolConfig
|
||||
internalRPC protocolConfig
|
||||
|
||||
// autoTLS stores configuration that is received from the auto-encrypt or
|
||||
// auto-config features.
|
||||
// autoTLS stores configuration that is received from:
|
||||
// - The auto-encrypt or auto-config features for client agents
|
||||
// - The servercert.CertManager for server agents.
|
||||
autoTLS struct {
|
||||
extraCAPems []string
|
||||
connectCAPems []string
|
||||
cert *tls.Certificate
|
||||
verifyServerHostname bool
|
||||
peeringServerName string
|
||||
}
|
||||
|
||||
// logger is not protected by a lock. It must never be changed after
|
||||
|
@ -372,7 +378,7 @@ func (c *Configurator) UpdateAutoTLSCA(connectCAPems []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateAutoTLSCert receives the updated Auto-Encrypt certificate.
|
||||
// UpdateAutoTLSCert receives the updated automatically-provisioned certificate.
|
||||
func (c *Configurator) UpdateAutoTLSCert(pub, priv string) error {
|
||||
cert, err := tls.X509KeyPair([]byte(pub), []byte(priv))
|
||||
if err != nil {
|
||||
|
@ -388,6 +394,16 @@ func (c *Configurator) UpdateAutoTLSCert(pub, priv string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateAutoTLSPeeringServerName receives the updated automatically-provisioned certificate.
|
||||
func (c *Configurator) UpdateAutoTLSPeeringServerName(name string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.autoTLS.peeringServerName = name
|
||||
atomic.AddUint64(&c.version, 1)
|
||||
c.log("UpdateAutoTLSPeeringServerName")
|
||||
}
|
||||
|
||||
// UpdateAutoTLS receives updates from Auto-Config, only expected to be called on
|
||||
// client agents.
|
||||
func (c *Configurator) UpdateAutoTLS(manualCAPems, connectCAPems []string, pub, priv string, verifyServerHostname bool) error {
|
||||
|
@ -585,9 +601,12 @@ func (c *Configurator) commonTLSConfig(state protocolConfig, cfg ProtocolConfig,
|
|||
// to a server requesting a certificate. Return the autoEncrypt certificate
|
||||
// if possible, otherwise default to the manually provisioned one.
|
||||
tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
cert := c.autoTLS.cert
|
||||
if cert == nil {
|
||||
cert = state.cert
|
||||
cert := state.cert
|
||||
|
||||
// In the general case we only prefer to dial out with the autoTLS cert if we are a client.
|
||||
// The server's autoTLS cert is exclusively for peering control plane traffic.
|
||||
if !c.base.ServerMode && c.autoTLS.cert != nil {
|
||||
cert = c.autoTLS.cert
|
||||
}
|
||||
|
||||
if cert == nil {
|
||||
|
@ -754,6 +773,18 @@ func (c *Configurator) IncomingGRPCConfig() *tls.Config {
|
|||
config.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) {
|
||||
return c.IncomingGRPCConfig(), nil
|
||||
}
|
||||
config.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if c.autoTLS.peeringServerName != "" && info.ServerName == c.autoTLS.peeringServerName {
|
||||
// For peering control plane traffic we exclusively use the internally managed certificate.
|
||||
// For all other traffic it is only a fallback if no manual certificate is provisioned.
|
||||
return c.autoTLS.cert, nil
|
||||
}
|
||||
|
||||
if c.grpc.cert != nil {
|
||||
return c.grpc.cert, nil
|
||||
}
|
||||
return c.autoTLS.cert, nil
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
|
|
|
@ -225,6 +225,67 @@ func TestConfigurator_IncomingConfig_Common(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfigurator_IncomingGRPCConfig_Peering(t *testing.T) {
|
||||
// Manually configure Alice's certificates
|
||||
cfg := Config{
|
||||
GRPC: ProtocolConfig{
|
||||
CertFile: "../test/hostname/Alice.crt",
|
||||
KeyFile: "../test/hostname/Alice.key",
|
||||
},
|
||||
}
|
||||
c := makeConfigurator(t, cfg)
|
||||
|
||||
// Set Bob's certificate via auto TLS.
|
||||
bobCert := loadFile(t, "../test/hostname/Bob.crt")
|
||||
bobKey := loadFile(t, "../test/hostname/Bob.key")
|
||||
require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey))
|
||||
|
||||
peeringServerName := "server.dc1.peering.1234"
|
||||
c.UpdateAutoTLSPeeringServerName(peeringServerName)
|
||||
|
||||
testutil.RunStep(t, "with peering name", func(t *testing.T) {
|
||||
client, errc, _ := startTLSServer(c.IncomingGRPCConfig())
|
||||
if client == nil {
|
||||
t.Fatalf("startTLSServer err: %v", <-errc)
|
||||
}
|
||||
tlsClient := tls.Client(client, &tls.Config{
|
||||
// When the peering server name is provided the server should present
|
||||
// the certificates configured via AutoTLS (Bob).
|
||||
ServerName: peeringServerName,
|
||||
InsecureSkipVerify: true,
|
||||
})
|
||||
require.NoError(t, tlsClient.Handshake())
|
||||
|
||||
certificates := tlsClient.ConnectionState().PeerCertificates
|
||||
require.NotEmpty(t, certificates)
|
||||
require.Equal(t, "Bob", certificates[0].Subject.CommonName)
|
||||
|
||||
// Check the server side of the handshake succeded.
|
||||
require.NoError(t, <-errc)
|
||||
})
|
||||
|
||||
testutil.RunStep(t, "without name", func(t *testing.T) {
|
||||
client, errc, _ := startTLSServer(c.IncomingGRPCConfig())
|
||||
if client == nil {
|
||||
t.Fatalf("startTLSServer err: %v", <-errc)
|
||||
}
|
||||
|
||||
tlsClient := tls.Client(client, &tls.Config{
|
||||
// ServerName: peeringServerName,
|
||||
InsecureSkipVerify: true,
|
||||
})
|
||||
require.NoError(t, tlsClient.Handshake())
|
||||
|
||||
certificates := tlsClient.ConnectionState().PeerCertificates
|
||||
require.NotEmpty(t, certificates)
|
||||
|
||||
// Should default to presenting the manually configured certificates.
|
||||
require.Equal(t, "Alice", certificates[0].Subject.CommonName)
|
||||
|
||||
// Check the server side of the handshake succeded.
|
||||
require.NoError(t, <-errc)
|
||||
})
|
||||
}
|
||||
func TestConfigurator_IncomingInsecureRPCConfig(t *testing.T) {
|
||||
// if this test is failing because of expired certificates
|
||||
// use the procedure in test/CA-GENERATION.md
|
||||
|
@ -406,6 +467,98 @@ func TestConfigurator_ALPNRPCConfig(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestConfigurator_OutgoingRPC_ServerMode(t *testing.T) {
|
||||
type testCase struct {
|
||||
clientConfig Config
|
||||
expectName string
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testCase) {
|
||||
serverCfg := makeConfigurator(t, Config{
|
||||
InternalRPC: ProtocolConfig{
|
||||
CAFile: "../test/hostname/CertAuth.crt",
|
||||
CertFile: "../test/hostname/Alice.crt",
|
||||
KeyFile: "../test/hostname/Alice.key",
|
||||
VerifyIncoming: true,
|
||||
},
|
||||
ServerMode: true,
|
||||
})
|
||||
|
||||
serverConn, errc, certc := startTLSServer(serverCfg.IncomingRPCConfig())
|
||||
if serverConn == nil {
|
||||
t.Fatalf("startTLSServer err: %v", <-errc)
|
||||
}
|
||||
|
||||
clientCfg := makeConfigurator(t, tc.clientConfig)
|
||||
|
||||
bettyCert := loadFile(t, "../test/hostname/Betty.crt")
|
||||
bettyKey := loadFile(t, "../test/hostname/Betty.key")
|
||||
require.NoError(t, clientCfg.UpdateAutoTLSCert(bettyCert, bettyKey))
|
||||
|
||||
wrap := clientCfg.OutgoingRPCWrapper()
|
||||
require.NotNil(t, wrap)
|
||||
|
||||
tlsClient, err := wrap("dc1", serverConn)
|
||||
require.NoError(t, err)
|
||||
defer tlsClient.Close()
|
||||
|
||||
err = tlsClient.(*tls.Conn).Handshake()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = <-errc
|
||||
require.NoError(t, err)
|
||||
|
||||
clientCerts := <-certc
|
||||
require.NotEmpty(t, clientCerts)
|
||||
|
||||
require.Equal(t, tc.expectName, clientCerts[0].Subject.CommonName)
|
||||
|
||||
// Check the server side of the handshake succeeded.
|
||||
require.NoError(t, <-errc)
|
||||
}
|
||||
|
||||
tt := map[string]testCase{
|
||||
"server with manual cert": {
|
||||
clientConfig: Config{
|
||||
InternalRPC: ProtocolConfig{
|
||||
VerifyOutgoing: true,
|
||||
CAFile: "../test/hostname/CertAuth.crt",
|
||||
CertFile: "../test/hostname/Bob.crt",
|
||||
KeyFile: "../test/hostname/Bob.key",
|
||||
},
|
||||
ServerMode: true,
|
||||
},
|
||||
// Even though an AutoTLS cert is configured, the server will prefer the manually configured cert.
|
||||
expectName: "Bob",
|
||||
},
|
||||
"client with manual cert": {
|
||||
clientConfig: Config{
|
||||
InternalRPC: ProtocolConfig{
|
||||
VerifyOutgoing: true,
|
||||
CAFile: "../test/hostname/CertAuth.crt",
|
||||
CertFile: "../test/hostname/Bob.crt",
|
||||
KeyFile: "../test/hostname/Bob.key",
|
||||
},
|
||||
ServerMode: false,
|
||||
},
|
||||
expectName: "Betty",
|
||||
},
|
||||
"client with auto-TLS": {
|
||||
clientConfig: Config{
|
||||
ServerMode: false,
|
||||
AutoTLS: true,
|
||||
},
|
||||
expectName: "Betty",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurator_OutgoingInternalRPCWrapper(t *testing.T) {
|
||||
// if this test is failing because of expired certificates
|
||||
// use the procedure in test/CA-GENERATION.md
|
||||
|
|
Loading…
Reference in New Issue