Merge pull request #14796 from hashicorp/peering/use-connect-ca
This commit is contained in:
commit
8d93f120ea
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
peering: require TLS for peering connections using server cert signed by Connect CA
|
||||
```
|
|
@ -1443,8 +1443,8 @@ func TestAgent_Self(t *testing.T) {
|
|||
}
|
||||
ports = {
|
||||
grpc = -1
|
||||
}
|
||||
`,
|
||||
grpc_tls = -1
|
||||
}`,
|
||||
expectXDS: false,
|
||||
grpcTLS: false,
|
||||
},
|
||||
|
@ -1453,7 +1453,9 @@ func TestAgent_Self(t *testing.T) {
|
|||
node_meta {
|
||||
somekey = "somevalue"
|
||||
}
|
||||
`,
|
||||
ports = {
|
||||
grpc_tls = -1
|
||||
}`,
|
||||
expectXDS: true,
|
||||
grpcTLS: false,
|
||||
},
|
||||
|
@ -1461,8 +1463,7 @@ func TestAgent_Self(t *testing.T) {
|
|||
hcl: `
|
||||
node_meta {
|
||||
somekey = "somevalue"
|
||||
}
|
||||
`,
|
||||
}`,
|
||||
expectXDS: true,
|
||||
grpcTLS: true,
|
||||
},
|
||||
|
|
|
@ -183,8 +183,7 @@ func TestCAWithKeyType(t testing.T, xc *structs.CARoot, keyType string, keyBits
|
|||
return testCA(t, xc, keyType, keyBits, 0)
|
||||
}
|
||||
|
||||
func testLeafWithID(t testing.T, spiffeId CertURI, root *structs.CARoot, keyType string, keyBits int, expiration time.Duration) (string, string, error) {
|
||||
|
||||
func testLeafWithID(t testing.T, spiffeId CertURI, dnsSAN string, root *structs.CARoot, keyType string, keyBits int, expiration time.Duration) (string, string, error) {
|
||||
if expiration == 0 {
|
||||
// this is 10 years
|
||||
expiration = 10 * 365 * 24 * time.Hour
|
||||
|
@ -238,6 +237,7 @@ func testLeafWithID(t testing.T, spiffeId CertURI, root *structs.CARoot, keyType
|
|||
NotBefore: time.Now(),
|
||||
AuthorityKeyId: testKeyID(t, caSigner.Public()),
|
||||
SubjectKeyId: testKeyID(t, pkSigner.Public()),
|
||||
DNSNames: []string{dnsSAN},
|
||||
}
|
||||
|
||||
// Create the certificate, PEM encode it and return that value.
|
||||
|
@ -263,7 +263,7 @@ func TestAgentLeaf(t testing.T, node string, datacenter string, root *structs.CA
|
|||
Agent: node,
|
||||
}
|
||||
|
||||
return testLeafWithID(t, spiffeId, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, expiration)
|
||||
return testLeafWithID(t, spiffeId, "", root, DefaultPrivateKeyType, DefaultPrivateKeyBits, expiration)
|
||||
}
|
||||
|
||||
func testLeaf(t testing.T, service string, namespace string, root *structs.CARoot, keyType string, keyBits int) (string, string, error) {
|
||||
|
@ -275,7 +275,7 @@ func testLeaf(t testing.T, service string, namespace string, root *structs.CARoo
|
|||
Service: service,
|
||||
}
|
||||
|
||||
return testLeafWithID(t, spiffeId, root, keyType, keyBits, 0)
|
||||
return testLeafWithID(t, spiffeId, "", root, keyType, keyBits, 0)
|
||||
}
|
||||
|
||||
// TestLeaf returns a valid leaf certificate and it's private key for the named
|
||||
|
@ -305,7 +305,23 @@ func TestMeshGatewayLeaf(t testing.T, partition string, root *structs.CARoot) (s
|
|||
Datacenter: "dc1",
|
||||
}
|
||||
|
||||
certPEM, keyPEM, err := testLeafWithID(t, spiffeId, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
|
||||
certPEM, keyPEM, err := testLeafWithID(t, spiffeId, "", root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
return certPEM, keyPEM
|
||||
}
|
||||
|
||||
func TestServerLeaf(t testing.T, dc string, root *structs.CARoot) (string, string) {
|
||||
t.Helper()
|
||||
|
||||
spiffeID := &SpiffeIDServer{
|
||||
Datacenter: dc,
|
||||
Host: fmt.Sprintf("%s.consul", TestClusterID),
|
||||
}
|
||||
san := PeeringServerSAN(dc, TestTrustDomain)
|
||||
|
||||
certPEM, keyPEM, err := testLeafWithID(t, spiffeID, san, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
@ -34,27 +35,23 @@ import (
|
|||
)
|
||||
|
||||
func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||
t.Run("without-tls", func(t *testing.T) {
|
||||
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, false)
|
||||
})
|
||||
t.Run("with-tls", func(t *testing.T) {
|
||||
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
|
||||
})
|
||||
}
|
||||
|
||||
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, acceptor := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptor"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
if enableTLS {
|
||||
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
||||
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt"
|
||||
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
|
||||
|
@ -94,11 +91,6 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
|
|||
c.NodeName = "dialer"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
if enableTLS {
|
||||
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
||||
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
|
||||
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
|
||||
|
||||
|
@ -169,13 +161,22 @@ func TestLeader_PeeringSync_Lifecycle_UnexportWhileDown(t *testing.T) {
|
|||
}
|
||||
|
||||
// Reserve a gRPC port so we can restart the accepting server with the same port.
|
||||
ports := freeport.GetN(t, 1)
|
||||
dialingServerPort := ports[0]
|
||||
dialingServerPort := freeport.GetOne(t)
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, acceptor := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptor"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
|
||||
|
||||
|
@ -207,10 +208,11 @@ func TestLeader_PeeringSync_Lifecycle_UnexportWhileDown(t *testing.T) {
|
|||
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
|
||||
_, dialer := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "dialer"
|
||||
c.Datacenter = "dc1"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
c.GRPCPort = dialingServerPort
|
||||
})
|
||||
testrpc.WaitForLeader(t, dialer.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
|
||||
|
||||
// Create a peering at dialer by establishing a peering with acceptor's token
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
|
||||
|
@ -344,27 +346,23 @@ func TestLeader_PeeringSync_Lifecycle_UnexportWhileDown(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||
t.Run("without-tls", func(t *testing.T) {
|
||||
testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t, false)
|
||||
})
|
||||
t.Run("with-tls", func(t *testing.T) {
|
||||
testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t, true)
|
||||
})
|
||||
}
|
||||
|
||||
func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS bool) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, acceptor := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptor"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
if enableTLS {
|
||||
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
||||
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt"
|
||||
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
|
||||
|
@ -399,11 +397,6 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
|
|||
c.NodeName = "dialer"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
if enableTLS {
|
||||
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
||||
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
|
||||
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
|
||||
|
||||
|
@ -476,7 +469,7 @@ func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) {
|
|||
t.Run("server-name-validation", func(t *testing.T) {
|
||||
testLeader_PeeringSync_failsForTLSError(t, func(token *structs.PeeringToken) {
|
||||
token.ServerName = "wrong.name"
|
||||
}, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.consul, bob.server.dc1.consul, not wrong.name`)
|
||||
}, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.peering.11111111-2222-3333-4444-555555555555.consul, not wrong.name`)
|
||||
})
|
||||
t.Run("bad-ca-roots", func(t *testing.T) {
|
||||
wrongRoot, err := ioutil.ReadFile("../../test/client_certs/rootca.crt")
|
||||
|
@ -491,14 +484,20 @@ func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) {
|
|||
func testLeader_PeeringSync_failsForTLSError(t *testing.T, tokenMutateFn func(token *structs.PeeringToken), expectErr string) {
|
||||
require.NotNil(t, tokenMutateFn)
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "bob"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
|
||||
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
||||
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt"
|
||||
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
|
@ -541,10 +540,6 @@ func testLeader_PeeringSync_failsForTLSError(t *testing.T, tokenMutateFn func(to
|
|||
c.NodeName = "betty"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
|
||||
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
||||
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
|
||||
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
|
||||
})
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
|
@ -583,11 +578,11 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
// TODO(peering): Configure with TLS
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s1.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
|
@ -662,15 +657,21 @@ func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
|
|||
}
|
||||
|
||||
// Reserve a gRPC port so we can restart the accepting server with the same port.
|
||||
ports := freeport.GetN(t, 1)
|
||||
acceptingServerPort := ports[0]
|
||||
acceptingServerPort := freeport.GetOne(t)
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, acceptingServer := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptingServer.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.GRPCPort = acceptingServerPort
|
||||
c.PeeringEnabled = true
|
||||
c.GRPCTLSPort = acceptingServerPort
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptingServer.RPC, "dc1")
|
||||
|
||||
|
@ -773,9 +774,17 @@ func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
|
|||
c.NodeName = "acceptingServer.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.GRPCPort = acceptingServerPort
|
||||
c.DataDir = acceptingServer.config.DataDir
|
||||
c.NodeID = acceptingServer.config.NodeID
|
||||
c.GRPCTLSPort = acceptingServerPort
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1")
|
||||
|
@ -870,11 +879,19 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s1.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.PeeringEnabled = true
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
|
@ -1172,11 +1189,19 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|||
lastIdx = uint64(0)
|
||||
)
|
||||
|
||||
// TODO(peering): Configure with TLS
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s1.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
|
@ -1583,10 +1608,20 @@ func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
|
|||
maxRetryBackoff = 1
|
||||
t.Cleanup(func() { maxRetryBackoff = orig })
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, acceptor := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "acceptor"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
|
||||
|
||||
|
|
|
@ -9,12 +9,14 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/acl/resolver"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
|
||||
"github.com/hashicorp/consul/agent/rpc/peering"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
|
@ -53,10 +55,34 @@ func (b *PeeringBackend) GetLeaderAddress() string {
|
|||
return b.leaderAddr
|
||||
}
|
||||
|
||||
// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator.
|
||||
func (b *PeeringBackend) GetAgentCACertificates() ([]string, error) {
|
||||
// TODO(peering): handle empty CA pems
|
||||
return b.srv.tlsConfigurator.GRPCManualCAPems(), nil
|
||||
// GetTLSMaterials returns the TLS materials for the dialer to dial the acceptor using TLS.
|
||||
// It returns the server name to validate, and the CA certificate to validate with.
|
||||
func (b *PeeringBackend) GetTLSMaterials(generatingToken bool) (string, []string, error) {
|
||||
if generatingToken {
|
||||
if !b.srv.config.ConnectEnabled {
|
||||
return "", nil, fmt.Errorf("connect.enabled must be set to true in the server's configuration when generating peering tokens")
|
||||
}
|
||||
if b.srv.config.GRPCTLSPort <= 0 && !b.srv.tlsConfigurator.GRPCServerUseTLS() {
|
||||
return "", nil, fmt.Errorf("TLS for gRPC must be enabled when generating peering tokens")
|
||||
}
|
||||
}
|
||||
|
||||
roots, err := b.srv.getCARoots(nil, b.srv.fsm.State())
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed to fetch roots: %w", err)
|
||||
}
|
||||
if len(roots.Roots) == 0 || roots.TrustDomain == "" {
|
||||
return "", nil, fmt.Errorf("CA has not finished initializing")
|
||||
}
|
||||
|
||||
serverName := connect.PeeringServerSAN(b.srv.config.Datacenter, roots.TrustDomain)
|
||||
|
||||
var caPems []string
|
||||
for _, r := range roots.Roots {
|
||||
caPems = append(caPems, lib.EnsureTrailingNewline(r.RootCert))
|
||||
}
|
||||
|
||||
return serverName, caPems, nil
|
||||
}
|
||||
|
||||
// GetServerAddresses looks up server or mesh gateway addresses from the state store.
|
||||
|
@ -117,12 +143,6 @@ func serverAddresses(state *state.Store) ([]string, error) {
|
|||
return addrs, nil
|
||||
}
|
||||
|
||||
// GetServerName returns the SNI to be returned in the peering token data which
|
||||
// will be used by peers when establishing peering connections over TLS.
|
||||
func (b *PeeringBackend) GetServerName() string {
|
||||
return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "")
|
||||
}
|
||||
|
||||
// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now).
|
||||
func (b *PeeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
|
||||
jsonToken, err := json.Marshal(tok)
|
||||
|
|
|
@ -11,7 +11,10 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
gogrpc "google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
|
@ -21,9 +24,18 @@ func TestPeeringBackend_RejectsPartition(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc1"
|
||||
c.Bootstrap = true
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
@ -55,9 +67,17 @@ func TestPeeringBackend_IgnoresDefaultPartition(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Parallel()
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc1"
|
||||
c.Bootstrap = true
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
|
|
@ -7,17 +7,18 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
gogrpc "google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/proto/pbpeerstream"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPeeringBackend_ForwardToLeader(t *testing.T) {
|
||||
|
@ -25,17 +26,26 @@ func TestPeeringBackend_ForwardToLeader(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
_, conf1 := testServerConfig(t)
|
||||
server1, err := newServer(t, conf1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, conf2 := testServerConfig(t)
|
||||
conf2.Bootstrap = false
|
||||
server2, err := newServer(t, conf2)
|
||||
require.NoError(t, err)
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, server1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
_, server2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Bootstrap = false
|
||||
})
|
||||
|
||||
// Join a 2nd server (not the leader)
|
||||
testrpc.WaitForLeader(t, server1.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, server1.RPC, "dc1", nil)
|
||||
|
||||
joinLAN(t, server2, server1)
|
||||
testrpc.WaitForLeader(t, server2.RPC, "dc1")
|
||||
|
||||
|
@ -166,17 +176,26 @@ func TestPeerStreamService_ForwardToLeader(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
_, conf1 := testServerConfig(t)
|
||||
server1, err := newServer(t, conf1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, conf2 := testServerConfig(t)
|
||||
conf2.Bootstrap = false
|
||||
server2, err := newServer(t, conf2)
|
||||
require.NoError(t, err)
|
||||
ca := connect.TestCA(t, nil)
|
||||
_, server1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
_, server2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Bootstrap = false
|
||||
})
|
||||
|
||||
// server1 is leader, server2 follower
|
||||
testrpc.WaitForLeader(t, server1.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, server1.RPC, "dc1", nil)
|
||||
|
||||
joinLAN(t, server2, server1)
|
||||
testrpc.WaitForLeader(t, server2.RPC, "dc1")
|
||||
|
||||
|
|
|
@ -21,12 +21,14 @@ import (
|
|||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
grpcexternal "github.com/hashicorp/consul/agent/grpc-external"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||
tokenStore "github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -1463,10 +1465,20 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
|
||||
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
||||
|
||||
ca := connect.TestCA(t, nil)
|
||||
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc3"
|
||||
c.PrimaryDatacenter = "dc3"
|
||||
c.NodeName = "acceptingServer.dc3"
|
||||
c.GRPCTLSPort = freeport.GetOne(t)
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
},
|
||||
}
|
||||
})
|
||||
defer os.RemoveAll(dir3)
|
||||
defer s3.Shutdown()
|
||||
|
|
|
@ -232,7 +232,7 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
|
|||
}
|
||||
|
||||
// Apply config to copied fields because many tests only set the old
|
||||
//values.
|
||||
// values.
|
||||
config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled
|
||||
config.ACLResolverSettings.NodeName = config.NodeName
|
||||
config.ACLResolverSettings.Datacenter = config.Datacenter
|
||||
|
@ -247,15 +247,32 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
|
|||
})
|
||||
t.Cleanup(func() { srv.Shutdown() })
|
||||
|
||||
if srv.config.GRPCPort > 0 {
|
||||
for _, grpcPort := range []int{srv.config.GRPCPort, srv.config.GRPCTLSPort} {
|
||||
if grpcPort == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Normally the gRPC server listener is created at the agent level and
|
||||
// passed down into the Server creation.
|
||||
externalGRPCAddr := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCPort)
|
||||
ln, err := net.Listen("tcp", externalGRPCAddr)
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", grpcPort))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wrap the listener with TLS
|
||||
if deps.TLSConfigurator.GRPCServerUseTLS() {
|
||||
if grpcPort == srv.config.GRPCTLSPort || deps.TLSConfigurator.GRPCServerUseTLS() {
|
||||
// Set the internally managed server certificate. The cert manager is hooked to the Agent, so we need to bypass that here.
|
||||
if srv.config.PeeringEnabled && srv.config.ConnectEnabled {
|
||||
key, _ := srv.config.CAConfig.Config["PrivateKey"].(string)
|
||||
cert, _ := srv.config.CAConfig.Config["RootCert"].(string)
|
||||
if key != "" && cert != "" {
|
||||
ca := &structs.CARoot{
|
||||
SigningKey: key,
|
||||
RootCert: cert,
|
||||
}
|
||||
require.NoError(t, deps.TLSConfigurator.UpdateAutoTLSCert(connect.TestServerLeaf(t, srv.config.Datacenter, ca)))
|
||||
deps.TLSConfigurator.UpdateAutoTLSPeeringServerName(connect.PeeringServerSAN("dc1", connect.TestTrustDomain))
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap the listener with TLS.
|
||||
ln = tls.NewListener(ln, deps.TLSConfigurator.IncomingGRPCConfig())
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import (
|
|||
|
||||
const (
|
||||
testPeerID = "caf067a6-f112-4907-9101-d45857d2b149"
|
||||
testActiveStreamSecretID = "e778c518-f0db-473a-9224-24b357da971d"
|
||||
testPendingStreamSecretID = "522c0daf-2ef2-4dab-bc78-5e04e3daf552"
|
||||
testEstablishmentSecretID = "f6569d37-1c5b-4415-aae5-26f4594f7f60"
|
||||
)
|
||||
|
|
|
@ -22,25 +22,6 @@ import (
|
|||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
var validCA = `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg
|
||||
Q0EgNzAeFw0xODA1MjExNjMzMjhaFw0yODA1MTgxNjMzMjhaMBYxFDASBgNVBAMT
|
||||
C0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAER0qlxjnRcMEr
|
||||
iSGlH7G7dYU7lzBEmLUSMZkyBbClmyV8+e8WANemjn+PLnCr40If9cmpr7RnC9Qk
|
||||
GTaLnLiF16OCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/
|
||||
MGgGA1UdDgRhBF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1OTpjMjpmYTo0ZTo3
|
||||
NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToyNDpiMDowNDpiMzpl
|
||||
ODo5Nzo1Yjo3ZTBqBgNVHSMEYzBhgF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1
|
||||
OTpjMjpmYTo0ZTo3NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToy
|
||||
NDpiMDowNDpiMzplODo5Nzo1Yjo3ZTA/BgNVHREEODA2hjRzcGlmZmU6Ly8xMjRk
|
||||
ZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIuY29uc3VsMD0GA1UdHgEB
|
||||
/wQzMDGgLzAtgisxMjRkZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIu
|
||||
Y29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIQDzkkI7R+0U12a+zq2EQhP/n2mHmta+
|
||||
fs2hBxWIELGwTAIgLdO7RRw+z9nnxCIA6kNl//mIQb+PGItespiHZKAz74Q=
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
func TestHTTP_Peering_GenerateToken(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -50,6 +31,7 @@ func TestHTTP_Peering_GenerateToken(t *testing.T) {
|
|||
a := NewTestAgent(t, "")
|
||||
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
|
||||
|
||||
t.Run("No Body", func(t *testing.T) {
|
||||
req, err := http.NewRequest("POST", "/v1/peering/token", nil)
|
||||
|
@ -107,9 +89,9 @@ func TestHTTP_Peering_GenerateToken(t *testing.T) {
|
|||
var token structs.PeeringToken
|
||||
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
||||
|
||||
require.Nil(t, token.CA)
|
||||
require.Equal(t, []string{fmt.Sprintf("127.0.0.1:%d", a.config.GRPCPort)}, token.ServerAddresses)
|
||||
require.Equal(t, "server.dc1.consul", token.ServerName)
|
||||
require.NotNil(t, token.CA)
|
||||
require.Equal(t, []string{fmt.Sprintf("127.0.0.1:%d", a.config.GRPCTLSPort)}, token.ServerAddresses)
|
||||
require.Equal(t, "server.dc1.peering.11111111-2222-3333-4444-555555555555.consul", token.ServerName)
|
||||
|
||||
// The PeerID in the token is randomly generated so we don't assert on its value.
|
||||
require.NotEmpty(t, token.PeerID)
|
||||
|
@ -140,9 +122,9 @@ func TestHTTP_Peering_GenerateToken(t *testing.T) {
|
|||
var token structs.PeeringToken
|
||||
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
||||
|
||||
require.Nil(t, token.CA)
|
||||
require.NotNil(t, token.CA)
|
||||
require.Equal(t, []string{externalAddress}, token.ServerAddresses)
|
||||
require.Equal(t, "server.dc1.consul", token.ServerName)
|
||||
require.Equal(t, "server.dc1.peering.11111111-2222-3333-4444-555555555555.consul", token.ServerName)
|
||||
|
||||
// The PeerID in the token is randomly generated so we don't assert on its value.
|
||||
require.NotEmpty(t, token.PeerID)
|
||||
|
@ -159,6 +141,7 @@ func TestHTTP_Peering_GenerateToken_EdgeCases(t *testing.T) {
|
|||
|
||||
a := NewTestAgent(t, "")
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
|
||||
|
||||
body := &pbpeering.GenerateTokenRequest{
|
||||
PeerName: "peering-a",
|
||||
|
@ -219,10 +202,9 @@ func TestHTTP_Peering_Establish(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, "")
|
||||
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
|
||||
|
||||
t.Run("No Body", func(t *testing.T) {
|
||||
req, err := http.NewRequest("POST", "/v1/peering/establish", nil)
|
||||
|
@ -291,14 +273,17 @@ func TestHTTP_Peering_Establish(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err = http.NewRequest("POST", "/v1/peering/establish", bytes.NewReader(b))
|
||||
require.NoError(t, err)
|
||||
resp = httptest.NewRecorder()
|
||||
a2.srv.h.ServeHTTP(resp, req)
|
||||
require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String())
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, err = http.NewRequest("POST", "/v1/peering/establish", bytes.NewReader(b))
|
||||
require.NoError(r, err)
|
||||
|
||||
// success response does not currently return a value so {} is correct
|
||||
require.Equal(t, "{}", resp.Body.String())
|
||||
resp = httptest.NewRecorder()
|
||||
a2.srv.h.ServeHTTP(resp, req)
|
||||
require.Equal(r, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String())
|
||||
|
||||
// success response does not currently return a value so {} is correct
|
||||
require.Equal(r, "{}", resp.Body.String())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -416,6 +401,7 @@ func TestHTTP_Peering_Delete(t *testing.T) {
|
|||
a := NewTestAgent(t, "")
|
||||
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
|
||||
|
||||
bodyBytes, err := json.Marshal(&pbpeering.GenerateTokenRequest{
|
||||
PeerName: "foo",
|
||||
|
|
|
@ -114,17 +114,14 @@ type Backend interface {
|
|||
// partition and namespace from the token.
|
||||
ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error)
|
||||
|
||||
// GetAgentCACertificates returns the CA certificate to be returned in the peering token data
|
||||
GetAgentCACertificates() ([]string, error)
|
||||
// GetTLSMaterials returns the TLS materials for the dialer to dial the acceptor using TLS.
|
||||
// It returns the server name to validate, and the CA certificate to validate with.
|
||||
GetTLSMaterials(generatingToken bool) (string, []string, error)
|
||||
|
||||
// GetServerAddresses returns the addresses used for establishing a peering connection.
|
||||
// These may be server addresses or mesh gateway addresses if peering through mesh gateways.
|
||||
GetServerAddresses() ([]string, error)
|
||||
|
||||
// GetServerName returns the SNI to be returned in the peering token data which
|
||||
// will be used by peers when establishing peering connections over TLS.
|
||||
GetServerName() string
|
||||
|
||||
// EncodeToken packages a peering token into a slice of bytes.
|
||||
EncodeToken(tok *structs.PeeringToken) ([]byte, error)
|
||||
|
||||
|
@ -224,6 +221,11 @@ func (s *Server) GenerateToken(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
serverName, caPEMs, err := s.Backend.GetTLSMaterials(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
peering *pbpeering.Peering
|
||||
secretID string
|
||||
|
@ -291,11 +293,6 @@ func (s *Server) GenerateToken(
|
|||
break
|
||||
}
|
||||
|
||||
ca, err := s.Backend.GetAgentCACertificates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ServerExternalAddresses must be formatted as addr:port.
|
||||
var serverAddrs []string
|
||||
if len(req.ServerExternalAddresses) > 0 {
|
||||
|
@ -310,9 +307,9 @@ func (s *Server) GenerateToken(
|
|||
tok := structs.PeeringToken{
|
||||
// Store the UUID so that we can do a global search when handling inbound streams.
|
||||
PeerID: peering.ID,
|
||||
CA: ca,
|
||||
CA: caPEMs,
|
||||
ServerAddresses: serverAddrs,
|
||||
ServerName: s.Backend.GetServerName(),
|
||||
ServerName: serverName,
|
||||
EstablishmentSecret: secretID,
|
||||
}
|
||||
|
||||
|
@ -487,8 +484,13 @@ func (s *Server) validatePeeringLocality(token *structs.PeeringToken, partition
|
|||
|
||||
// If the token has the same server name as this cluster, but we can't find the peering
|
||||
// in our store, it indicates a naming conflict.
|
||||
if s.Backend.GetServerName() == token.ServerName && peering == nil {
|
||||
return fmt.Errorf("conflict - peering token's server name matches the current cluster's server name, %q, but there is no record in the database", s.Backend.GetServerName())
|
||||
serverName, _, err := s.Backend.GetTLSMaterials(false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch TLS materials: %w", err)
|
||||
}
|
||||
|
||||
if serverName == token.ServerName && peering == nil {
|
||||
return fmt.Errorf("conflict - peering token's server name matches the current cluster's server name, %q, but there is no record in the database", serverName)
|
||||
}
|
||||
|
||||
if peering != nil && acl.EqualPartitions(peering.GetPartition(), partition) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package peering_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
@ -19,6 +20,7 @@ import (
|
|||
grpcstatus "google.golang.org/grpc/status"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
|
@ -62,6 +64,7 @@ func generateTooManyMetaKeys() map[string]string {
|
|||
|
||||
func TestPeeringService_GenerateToken(t *testing.T) {
|
||||
dir := testutil.TempDir(t, "consul")
|
||||
|
||||
signer, _, _ := tlsutil.GeneratePrivateKey()
|
||||
ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
cafile := path.Join(dir, "cacert.pem")
|
||||
|
@ -97,10 +100,14 @@ func TestPeeringService_GenerateToken(t *testing.T) {
|
|||
|
||||
token := &structs.PeeringToken{}
|
||||
require.NoError(t, json.Unmarshal(tokenJSON, token))
|
||||
require.Equal(t, "server.dc1.consul", token.ServerName)
|
||||
require.Equal(t, "server.dc1.peering.11111111-2222-3333-4444-555555555555.consul", token.ServerName)
|
||||
require.Len(t, token.ServerAddresses, 1)
|
||||
require.Equal(t, s.PublicGRPCAddr, token.ServerAddresses[0])
|
||||
require.Equal(t, []string{ca}, token.CA)
|
||||
|
||||
// The roots utilized should be the ConnectCA roots and not the ones manually configured.
|
||||
_, roots, err := s.Server.FSM().State().CARoots(nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{roots.Active().RootCert}, token.CA)
|
||||
|
||||
require.NotEmpty(t, token.EstablishmentSecret)
|
||||
secret = token.EstablishmentSecret
|
||||
|
@ -165,6 +172,7 @@ func TestPeeringService_GenerateToken(t *testing.T) {
|
|||
|
||||
func TestPeeringService_GenerateTokenExternalAddress(t *testing.T) {
|
||||
dir := testutil.TempDir(t, "consul")
|
||||
|
||||
signer, _, _ := tlsutil.GeneratePrivateKey()
|
||||
ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
cafile := path.Join(dir, "cacert.pem")
|
||||
|
@ -191,10 +199,14 @@ func TestPeeringService_GenerateTokenExternalAddress(t *testing.T) {
|
|||
|
||||
token := &structs.PeeringToken{}
|
||||
require.NoError(t, json.Unmarshal(tokenJSON, token))
|
||||
require.Equal(t, "server.dc1.consul", token.ServerName)
|
||||
require.Equal(t, "server.dc1.peering.11111111-2222-3333-4444-555555555555.consul", token.ServerName)
|
||||
require.Len(t, token.ServerAddresses, 1)
|
||||
require.Equal(t, externalAddress, token.ServerAddresses[0])
|
||||
require.Equal(t, []string{ca}, token.CA)
|
||||
|
||||
// The roots utilized should be the ConnectCA roots and not the ones manually configured.
|
||||
_, roots, err := s.Server.FSM().State().CARoots(nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []string{roots.Active().RootCert}, token.CA)
|
||||
}
|
||||
|
||||
func TestPeeringService_GenerateToken_ACLEnforcement(t *testing.T) {
|
||||
|
@ -385,9 +397,13 @@ func TestPeeringService_Establish_serverNameConflict(t *testing.T) {
|
|||
// Manufacture token to have the same server name but a PeerID not in the store.
|
||||
id, err := uuid.GenerateUUID()
|
||||
require.NoError(t, err, "could not generate uuid")
|
||||
|
||||
serverName, _, err := s.Server.GetPeeringBackend().GetTLSMaterials(true)
|
||||
require.NoError(t, err)
|
||||
|
||||
peeringToken := structs.PeeringToken{
|
||||
ServerAddresses: []string{"1.2.3.4:8502"},
|
||||
ServerName: s.Server.GetPeeringBackend().GetServerName(),
|
||||
ServerName: serverName,
|
||||
EstablishmentSecret: "foo",
|
||||
PeerID: id,
|
||||
}
|
||||
|
@ -409,12 +425,15 @@ func TestPeeringService_Establish_serverNameConflict(t *testing.T) {
|
|||
|
||||
func TestPeeringService_Establish(t *testing.T) {
|
||||
// TODO(peering): see note on newTestServer, refactor to not use this
|
||||
s1 := newTestServer(t, nil)
|
||||
s1 := newTestServer(t, func(conf *consul.Config) {
|
||||
conf.NodeName = "s1"
|
||||
})
|
||||
client1 := pbpeering.NewPeeringServiceClient(s1.ClientConn(t))
|
||||
|
||||
s2 := newTestServer(t, func(conf *consul.Config) {
|
||||
conf.NodeName = "s2"
|
||||
conf.Datacenter = "dc2"
|
||||
conf.GRPCPort = 5301
|
||||
conf.PrimaryDatacenter = "dc2"
|
||||
})
|
||||
client2 := pbpeering.NewPeeringServiceClient(s2.ClientConn(t))
|
||||
|
||||
|
@ -430,8 +449,10 @@ func TestPeeringService_Establish(t *testing.T) {
|
|||
|
||||
var peerID string
|
||||
testutil.RunStep(t, "peering can be established from token", func(t *testing.T) {
|
||||
_, err = client2.Establish(ctx, &pbpeering.EstablishRequest{PeerName: "my-peer-s1", PeeringToken: tokenResp.PeeringToken})
|
||||
require.NoError(t, err)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, err = client2.Establish(ctx, &pbpeering.EstablishRequest{PeerName: "my-peer-s1", PeeringToken: tokenResp.PeeringToken})
|
||||
require.NoError(r, err)
|
||||
})
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
@ -1097,9 +1118,7 @@ func TestPeeringService_TrustBundleListByService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPeeringService_validatePeer(t *testing.T) {
|
||||
s1 := newTestServer(t, func(c *consul.Config) {
|
||||
c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.1"
|
||||
})
|
||||
s1 := newTestServer(t, nil)
|
||||
client1 := pbpeering.NewPeeringServiceClient(s1.ClientConn(t))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
@ -1113,8 +1132,8 @@ func TestPeeringService_validatePeer(t *testing.T) {
|
|||
})
|
||||
|
||||
s2 := newTestServer(t, func(conf *consul.Config) {
|
||||
conf.GRPCPort = 5301
|
||||
conf.Datacenter = "dc2"
|
||||
conf.PrimaryDatacenter = "dc2"
|
||||
})
|
||||
client2 := pbpeering.NewPeeringServiceClient(s2.ClientConn(t))
|
||||
|
||||
|
@ -1364,7 +1383,18 @@ func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer {
|
|||
conf.PrimaryDatacenter = "dc1"
|
||||
conf.ConnectEnabled = true
|
||||
|
||||
conf.GRPCPort = ports[3]
|
||||
ca := connect.TestCA(t, nil)
|
||||
conf.CAConfig = &structs.CAConfiguration{
|
||||
ClusterID: connect.TestClusterID,
|
||||
Provider: structs.ConsulCAProvider,
|
||||
Config: map[string]interface{}{
|
||||
"PrivateKey": ca.SigningKey,
|
||||
"RootCert": ca.RootCert,
|
||||
"LeafCertTTL": "72h",
|
||||
"IntermediateCertTTL": "288h",
|
||||
},
|
||||
}
|
||||
conf.GRPCTLSPort = ports[3]
|
||||
|
||||
nodeID, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
|
@ -1383,27 +1413,34 @@ func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer {
|
|||
conf.ACLResolverSettings.Datacenter = conf.Datacenter
|
||||
conf.ACLResolverSettings.EnterpriseMeta = *conf.AgentEnterpriseMeta()
|
||||
|
||||
externalGRPCServer := gogrpc.NewServer()
|
||||
|
||||
deps := newDefaultDeps(t, conf)
|
||||
externalGRPCServer := external.NewServer(deps.Logger)
|
||||
|
||||
server, err := consul.NewServer(conf, deps, externalGRPCServer)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, server.Shutdown())
|
||||
})
|
||||
|
||||
require.NoError(t, deps.TLSConfigurator.UpdateAutoTLSCert(connect.TestServerLeaf(t, conf.Datacenter, ca)))
|
||||
deps.TLSConfigurator.UpdateAutoTLSPeeringServerName(connect.PeeringServerSAN(conf.Datacenter, connect.TestTrustDomain))
|
||||
|
||||
// Normally the gRPC server listener is created at the agent level and
|
||||
// passed down into the Server creation.
|
||||
grpcAddr := fmt.Sprintf("127.0.0.1:%d", conf.GRPCPort)
|
||||
grpcAddr := fmt.Sprintf("127.0.0.1:%d", conf.GRPCTLSPort)
|
||||
|
||||
ln, err := net.Listen("tcp", grpcAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
ln = tls.NewListener(ln, deps.TLSConfigurator.IncomingGRPCConfig())
|
||||
|
||||
go func() {
|
||||
_ = externalGRPCServer.Serve(ln)
|
||||
}()
|
||||
t.Cleanup(externalGRPCServer.Stop)
|
||||
|
||||
testrpc.WaitForLeader(t, server.RPC, conf.Datacenter)
|
||||
testrpc.WaitForActiveCARoot(t, server.RPC, conf.Datacenter, nil)
|
||||
|
||||
return testingServer{
|
||||
Server: server,
|
||||
|
|
|
@ -38,9 +38,7 @@ func validatePeeringToken(tok *structs.PeeringToken) error {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(peering): validate name matches SNI?
|
||||
// TODO(peering): validate name well formed?
|
||||
if tok.ServerName == "" {
|
||||
if len(tok.CA) > 0 && tok.ServerName == "" {
|
||||
return errPeeringTokenEmptyServerName
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ func (a *TestAgent) Start(t *testing.T) error {
|
|||
Name: name,
|
||||
})
|
||||
|
||||
portsConfig := randomPortsSource(t, a.UseHTTPS, a.UseGRPCTLS)
|
||||
portsConfig := randomPortsSource(t, a.UseHTTPS)
|
||||
|
||||
// Create NodeID outside the closure, so that it does not change
|
||||
testHCLConfig := TestConfigHCL(NodeID())
|
||||
|
@ -412,7 +412,7 @@ func (a *TestAgent) consulConfig() *consul.Config {
|
|||
// chance of port conflicts for concurrently executed test binaries.
|
||||
// Instead of relying on one set of ports to be sufficient we retry
|
||||
// starting the agent with different ports on port conflict.
|
||||
func randomPortsSource(t *testing.T, useHTTPS bool, useGRPCTLS bool) string {
|
||||
func randomPortsSource(t *testing.T, useHTTPS bool) string {
|
||||
ports := freeport.GetN(t, 8)
|
||||
|
||||
var http, https int
|
||||
|
@ -424,15 +424,6 @@ func randomPortsSource(t *testing.T, useHTTPS bool, useGRPCTLS bool) string {
|
|||
https = -1
|
||||
}
|
||||
|
||||
var grpc, grpcTLS int
|
||||
if useGRPCTLS {
|
||||
grpc = -1
|
||||
grpcTLS = ports[7]
|
||||
} else {
|
||||
grpc = ports[6]
|
||||
grpcTLS = -1
|
||||
}
|
||||
|
||||
return `
|
||||
ports = {
|
||||
dns = ` + strconv.Itoa(ports[0]) + `
|
||||
|
@ -441,8 +432,8 @@ func randomPortsSource(t *testing.T, useHTTPS bool, useGRPCTLS bool) string {
|
|||
serf_lan = ` + strconv.Itoa(ports[3]) + `
|
||||
serf_wan = ` + strconv.Itoa(ports[4]) + `
|
||||
server = ` + strconv.Itoa(ports[5]) + `
|
||||
grpc = ` + strconv.Itoa(grpc) + `
|
||||
grpc_tls = ` + strconv.Itoa(grpcTLS) + `
|
||||
grpc = ` + strconv.Itoa(ports[6]) + `
|
||||
grpc_tls = ` + strconv.Itoa(ports[7]) + `
|
||||
}
|
||||
`
|
||||
}
|
||||
|
|
|
@ -96,6 +96,10 @@ func makeClientWithConfig(
|
|||
if server.Config.Bootstrap {
|
||||
server.WaitForLeader(t)
|
||||
}
|
||||
connectEnabled := server.Config.Connect["enabled"]
|
||||
if enabled, ok := connectEnabled.(bool); ok && server.Config.Server && enabled {
|
||||
server.WaitForActiveCARoot(t)
|
||||
}
|
||||
|
||||
conf.Address = server.HTTPAddr
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ func TestAPI_Peering_ACLDeny(t *testing.T) {
|
|||
serverConfig.ACL.Tokens.InitialManagement = "root"
|
||||
serverConfig.ACL.Enabled = true
|
||||
serverConfig.ACL.DefaultPolicy = "deny"
|
||||
serverConfig.Ports.GRPC = 5300
|
||||
})
|
||||
defer s1.Stop()
|
||||
|
||||
|
@ -52,7 +51,6 @@ func TestAPI_Peering_ACLDeny(t *testing.T) {
|
|||
serverConfig.ACL.Tokens.InitialManagement = "root"
|
||||
serverConfig.ACL.Enabled = true
|
||||
serverConfig.ACL.DefaultPolicy = "deny"
|
||||
serverConfig.Ports.GRPC = 5301
|
||||
serverConfig.Datacenter = "dc2"
|
||||
})
|
||||
defer s2.Stop()
|
||||
|
@ -265,7 +263,7 @@ func TestAPI_Peering_GenerateToken_ExternalAddresses(t *testing.T) {
|
|||
func TestAPI_Peering_GenerateToken_Read_Establish_Delete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
c, s := makeClient(t) // this is "dc1"
|
||||
c, s := makeClientWithConfig(t, nil, nil) // this is "dc1"
|
||||
defer s.Stop()
|
||||
s.WaitForSerfCheck(t)
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/agent"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
|
@ -84,10 +85,12 @@ func TestEstablishCommand(t *testing.T) {
|
|||
fmt.Sprintf("-peering-token=%s", res.PeeringToken),
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
require.Equal(t, 0, code)
|
||||
output := ui.OutputWriter.String()
|
||||
require.Contains(t, output, "Success")
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
code := cmd.Run(args)
|
||||
require.Equal(r, 0, code)
|
||||
output := ui.OutputWriter.String()
|
||||
require.Contains(r, output, "Success")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("establish connection with options", func(t *testing.T) {
|
||||
|
@ -107,12 +110,14 @@ func TestEstablishCommand(t *testing.T) {
|
|||
"-meta=region=us-west-1",
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
require.Equal(t, 0, code)
|
||||
output := ui.OutputWriter.String()
|
||||
require.Contains(t, output, "Success")
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
code := cmd.Run(args)
|
||||
require.Equal(r, 0, code)
|
||||
output := ui.OutputWriter.String()
|
||||
require.Contains(r, output, "Success")
|
||||
})
|
||||
|
||||
//Meta
|
||||
// Meta
|
||||
peering, _, err := dialingClient.Peerings().Read(context.Background(), "bar", &api.QueryOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ func TestGenerateCommand(t *testing.T) {
|
|||
require.Equal(t, 0, code)
|
||||
token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String())
|
||||
require.NoError(t, err, "error decoding token")
|
||||
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
|
||||
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.peering.11111111-2222-3333-4444-555555555555.consul\"")
|
||||
})
|
||||
|
||||
t.Run("generate token with options", func(t *testing.T) {
|
||||
|
@ -97,13 +97,13 @@ func TestGenerateCommand(t *testing.T) {
|
|||
require.Equal(t, 0, code)
|
||||
token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String())
|
||||
require.NoError(t, err, "error decoding token")
|
||||
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
|
||||
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.peering.11111111-2222-3333-4444-555555555555.consul\"")
|
||||
|
||||
//ServerExternalAddresses
|
||||
// ServerExternalAddresses
|
||||
require.Contains(t, string(token), "1.2.3.4")
|
||||
require.Contains(t, string(token), "5.6.7.8")
|
||||
|
||||
//Meta
|
||||
// Meta
|
||||
peering, _, err := client.Peerings().Read(context.Background(), "bar", &api.QueryOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -136,6 +136,6 @@ func TestGenerateCommand(t *testing.T) {
|
|||
|
||||
token, err := base64.StdEncoding.DecodeString(outputRes.PeeringToken)
|
||||
require.NoError(t, err, "error decoding token")
|
||||
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"")
|
||||
require.Contains(t, string(token), "\"ServerName\":\"server.dc1.peering.11111111-2222-3333-4444-555555555555.consul\"")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ type TestPortConfig struct {
|
|||
SerfWan int `json:"serf_wan,omitempty"`
|
||||
Server int `json:"server,omitempty"`
|
||||
GRPC int `json:"grpc,omitempty"`
|
||||
GRPCTLS int `json:"grpc_tls,omitempty"`
|
||||
ProxyMinPort int `json:"proxy_min_port,omitempty"`
|
||||
ProxyMaxPort int `json:"proxy_max_port,omitempty"`
|
||||
}
|
||||
|
@ -156,7 +157,7 @@ func defaultServerConfig(t TestingTB) *TestServerConfig {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
ports := freeport.GetN(t, 7)
|
||||
ports := freeport.GetN(t, 8)
|
||||
|
||||
logBuffer := NewLogBuffer(t)
|
||||
|
||||
|
@ -180,6 +181,7 @@ func defaultServerConfig(t TestingTB) *TestServerConfig {
|
|||
SerfWan: ports[4],
|
||||
Server: ports[5],
|
||||
GRPC: ports[6],
|
||||
GRPCTLS: ports[7],
|
||||
},
|
||||
ReadyTimeout: 10 * time.Second,
|
||||
StopTimeout: 10 * time.Second,
|
||||
|
@ -224,11 +226,12 @@ type TestServer struct {
|
|||
cmd *exec.Cmd
|
||||
Config *TestServerConfig
|
||||
|
||||
HTTPAddr string
|
||||
HTTPSAddr string
|
||||
LANAddr string
|
||||
WANAddr string
|
||||
GRPCAddr string
|
||||
HTTPAddr string
|
||||
HTTPSAddr string
|
||||
LANAddr string
|
||||
WANAddr string
|
||||
GRPCAddr string
|
||||
GRPCTLSAddr string
|
||||
|
||||
HTTPClient *http.Client
|
||||
|
||||
|
@ -302,11 +305,12 @@ func NewTestServerConfigT(t TestingTB, cb ServerConfigCallback) (*TestServer, er
|
|||
Config: cfg,
|
||||
cmd: cmd,
|
||||
|
||||
HTTPAddr: httpAddr,
|
||||
HTTPSAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.HTTPS),
|
||||
LANAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.SerfLan),
|
||||
WANAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.SerfWan),
|
||||
GRPCAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.GRPC),
|
||||
HTTPAddr: httpAddr,
|
||||
HTTPSAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.HTTPS),
|
||||
LANAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.SerfLan),
|
||||
WANAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.SerfWan),
|
||||
GRPCAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.GRPC),
|
||||
GRPCTLSAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.GRPCTLS),
|
||||
|
||||
HTTPClient: client,
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
primary_datacenter = "primary"
|
||||
log_level = "trace"
|
||||
log_level = "trace"
|
|
@ -0,0 +1,6 @@
|
|||
ports {
|
||||
grpc_tls = 8503
|
||||
}
|
||||
connect {
|
||||
enabled = true
|
||||
}
|
|
@ -104,6 +104,13 @@ function init_workdir {
|
|||
mv workdir/${CLUSTER}/consul/server.hcl workdir/${CLUSTER}/consul-server/server.hcl
|
||||
fi
|
||||
|
||||
if test -f "workdir/${CLUSTER}/consul/peering_server.hcl" -a $REQUIRE_PEERS = "1"
|
||||
then
|
||||
mv workdir/${CLUSTER}/consul/peering_server.hcl workdir/${CLUSTER}/consul-server/peering_server.hcl
|
||||
else
|
||||
rm workdir/${CLUSTER}/consul/peering_server.hcl
|
||||
fi
|
||||
|
||||
# copy the ca-certs for SDS so we can verify the right ones are served
|
||||
mkdir -p workdir/test-sds-server/certs
|
||||
cp test-sds-server/certs/ca-root.crt workdir/test-sds-server/certs/ca-root.crt
|
||||
|
@ -216,11 +223,6 @@ function start_consul {
|
|||
docker_kill_rm consul-${DC}-server
|
||||
docker_kill_rm consul-${DC}
|
||||
|
||||
server_grpc_port="-1"
|
||||
if is_set $REQUIRE_PEERS; then
|
||||
server_grpc_port="8502"
|
||||
fi
|
||||
|
||||
docker run -d --name envoy_consul-${DC}-server_1 \
|
||||
--net=envoy-tests \
|
||||
$WORKDIR_SNIPPET \
|
||||
|
@ -231,7 +233,6 @@ function start_consul {
|
|||
agent -dev -datacenter "${DC}" \
|
||||
-config-dir "/workdir/${DC}/consul" \
|
||||
-config-dir "/workdir/${DC}/consul-server" \
|
||||
-grpc-port $server_grpc_port \
|
||||
-client "0.0.0.0" \
|
||||
-bind "0.0.0.0" >/dev/null
|
||||
|
||||
|
|
Loading…
Reference in New Issue