peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/base64"
|
|
|
|
"encoding/json"
|
2022-07-29 20:04:32 +00:00
|
|
|
"errors"
|
2022-07-22 19:05:08 +00:00
|
|
|
"fmt"
|
2022-07-15 18:15:50 +00:00
|
|
|
"io/ioutil"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-07-22 19:05:08 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2022-07-29 20:04:32 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"google.golang.org/grpc"
|
2022-07-29 20:04:32 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
grpcstatus "google.golang.org/grpc/status"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
2022-06-21 18:04:08 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/state"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2022-06-21 18:04:08 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/hashicorp/consul/proto/pbpeering"
|
2022-07-15 18:58:33 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2022-07-15 17:20:43 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
2022-07-15 18:15:50 +00:00
|
|
|
t.Run("without-tls", func(t *testing.T) {
|
|
|
|
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, false)
|
|
|
|
})
|
|
|
|
t.Run("with-tls", func(t *testing.T) {
|
|
|
|
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
2022-07-15 18:15:50 +00:00
|
|
|
c.NodeName = "bob"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
2022-07-15 18:15:50 +00:00
|
|
|
if enableTLS {
|
|
|
|
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt"
|
|
|
|
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a peering by generating a token
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
|
|
|
|
|
|
|
req := pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
}
|
|
|
|
resp, err := peeringClient.GenerateToken(ctx, &req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var token structs.PeeringToken
|
|
|
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
|
|
|
|
|
|
|
// S1 should not have a stream tracked for dc2 because s1 generated a token for baz, and therefore needs to wait to be dialed.
|
|
|
|
time.Sleep(1 * time.Second)
|
2022-07-08 17:01:13 +00:00
|
|
|
_, found := s1.peerStreamServer.StreamStatus(token.PeerID)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.False(t, found)
|
|
|
|
|
2022-06-21 18:04:08 +00:00
|
|
|
var (
|
|
|
|
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
)
|
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
|
|
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
2022-07-15 18:15:50 +00:00
|
|
|
c.NodeName = "betty"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc2"
|
2022-07-15 18:15:50 +00:00
|
|
|
if enableTLS {
|
|
|
|
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
|
|
|
|
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
|
|
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
|
|
|
p := &pbpeering.Peering{
|
2022-06-21 18:04:08 +00:00
|
|
|
ID: s2PeerID,
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
Name: "my-peer-s1",
|
|
|
|
PeerID: token.PeerID,
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
require.True(t, p.ShouldDial())
|
|
|
|
|
|
|
|
// We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store.
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-07-08 17:01:13 +00:00
|
|
|
status, found := s2.peerStreamServer.StreamStatus(p.ID)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.True(r, found)
|
|
|
|
require.True(r, status.Connected)
|
|
|
|
})
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
// Delete the peering to trigger the termination sequence.
|
|
|
|
deleted := &pbpeering.Peering{
|
2022-06-21 18:04:08 +00:00
|
|
|
ID: s2PeerID,
|
2022-06-13 14:22:46 +00:00
|
|
|
Name: "my-peer-s1",
|
|
|
|
DeletedAt: structs.TimeToProto(time.Now()),
|
|
|
|
}
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(2000, deleted))
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
s2.logger.Trace("deleted peering for my-peer-s1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-07-08 17:01:13 +00:00
|
|
|
_, found := s2.peerStreamServer.StreamStatus(p.ID)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.False(r, found)
|
|
|
|
})
|
|
|
|
|
|
|
|
// s1 should have also marked the peering as terminated.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
|
|
|
|
Value: "my-peer-s2",
|
|
|
|
})
|
|
|
|
require.NoError(r, err)
|
2022-06-14 14:39:23 +00:00
|
|
|
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
2022-07-15 18:15:50 +00:00
|
|
|
t.Run("without-tls", func(t *testing.T) {
|
|
|
|
testLeader_PeeringSync_Lifecycle_ServerDeletion(t, false)
|
|
|
|
})
|
|
|
|
t.Run("with-tls", func(t *testing.T) {
|
|
|
|
testLeader_PeeringSync_Lifecycle_ServerDeletion(t, true)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
func testLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T, enableTLS bool) {
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
2022-07-15 18:15:50 +00:00
|
|
|
c.NodeName = "bob"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
2022-07-15 18:15:50 +00:00
|
|
|
if enableTLS {
|
|
|
|
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt"
|
|
|
|
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a peering by generating a token
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
|
|
|
|
|
|
|
req := pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
}
|
|
|
|
resp, err := peeringClient.GenerateToken(ctx, &req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var token structs.PeeringToken
|
|
|
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
|
|
|
|
2022-06-21 18:04:08 +00:00
|
|
|
var (
|
|
|
|
s1PeerID = token.PeerID
|
|
|
|
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
)
|
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
|
|
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
2022-07-15 18:15:50 +00:00
|
|
|
c.NodeName = "betty"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc2"
|
2022-07-15 18:15:50 +00:00
|
|
|
if enableTLS {
|
|
|
|
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
|
|
|
|
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
|
|
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
|
|
|
p := &pbpeering.Peering{
|
2022-06-21 18:04:08 +00:00
|
|
|
ID: s2PeerID,
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
Name: "my-peer-s1",
|
|
|
|
PeerID: token.PeerID,
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
require.True(t, p.ShouldDial())
|
|
|
|
|
|
|
|
// We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store.
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-07-08 17:01:13 +00:00
|
|
|
status, found := s2.peerStreamServer.StreamStatus(p.ID)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.True(r, found)
|
|
|
|
require.True(r, status.Connected)
|
|
|
|
})
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
// Delete the peering from the server peer to trigger the termination sequence.
|
|
|
|
deleted := &pbpeering.Peering{
|
2022-06-21 18:04:08 +00:00
|
|
|
ID: s1PeerID,
|
2022-06-13 14:22:46 +00:00
|
|
|
Name: "my-peer-s2",
|
|
|
|
DeletedAt: structs.TimeToProto(time.Now()),
|
|
|
|
}
|
|
|
|
require.NoError(t, s1.fsm.State().PeeringWrite(2000, deleted))
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
s2.logger.Trace("deleted peering for my-peer-s1")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-07-08 17:01:13 +00:00
|
|
|
_, found := s1.peerStreamServer.StreamStatus(p.PeerID)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.False(r, found)
|
|
|
|
})
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
// s2 should have received the termination message and updated the peering state.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{
|
|
|
|
Value: "my-peer-s1",
|
|
|
|
})
|
|
|
|
require.NoError(r, err)
|
2022-06-14 14:39:23 +00:00
|
|
|
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
})
|
|
|
|
}
|
2022-06-14 01:50:59 +00:00
|
|
|
|
2022-07-15 18:15:50 +00:00
|
|
|
func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("server-name-validation", func(t *testing.T) {
|
|
|
|
testLeader_PeeringSync_failsForTLSError(t, func(p *pbpeering.Peering) {
|
|
|
|
p.PeerServerName = "wrong.name"
|
|
|
|
}, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.consul, bob.server.dc1.consul, not wrong.name`)
|
|
|
|
})
|
|
|
|
t.Run("bad-ca-roots", func(t *testing.T) {
|
|
|
|
wrongRoot, err := ioutil.ReadFile("../../test/client_certs/rootca.crt")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
testLeader_PeeringSync_failsForTLSError(t, func(p *pbpeering.Peering) {
|
|
|
|
p.PeerCAPems = []string{string(wrongRoot)}
|
|
|
|
}, `transport: authentication handshake failed: x509: certificate signed by unknown authority`)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testLeader_PeeringSync_failsForTLSError(t *testing.T, peerMutateFn func(p *pbpeering.Peering), expectErr string) {
|
|
|
|
require.NotNil(t, peerMutateFn)
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "bob"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
|
|
|
|
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt"
|
|
|
|
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a peering by generating a token
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
|
|
|
|
|
|
|
req := pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
}
|
|
|
|
resp, err := peeringClient.GenerateToken(ctx, &req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var token structs.PeeringToken
|
|
|
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
|
|
|
|
|
|
|
// S1 should not have a stream tracked for dc2 because s1 generated a token
|
|
|
|
// for baz, and therefore needs to wait to be dialed.
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
_, found := s1.peerStreamServer.StreamStatus(token.PeerID)
|
|
|
|
require.False(t, found)
|
|
|
|
|
|
|
|
var (
|
|
|
|
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
|
|
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "betty"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc2"
|
|
|
|
|
|
|
|
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
|
|
|
|
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
|
|
|
|
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
|
|
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
|
|
|
p := &pbpeering.Peering{
|
|
|
|
ID: s2PeerID,
|
|
|
|
Name: "my-peer-s1",
|
|
|
|
PeerID: token.PeerID,
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
peerMutateFn(p)
|
|
|
|
require.True(t, p.ShouldDial())
|
|
|
|
|
|
|
|
// We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store.
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
status, found := s2.peerStreamTracker.StreamStatus(p.ID)
|
|
|
|
require.True(r, found)
|
|
|
|
require.False(r, status.Connected)
|
|
|
|
require.Contains(r, status.LastSendErrorMessage, expectErr)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-06-14 01:50:59 +00:00
|
|
|
func TestLeader_Peering_DeferredDeletion(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(peering): Configure with TLS
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s1.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
var (
|
2022-06-21 18:04:08 +00:00
|
|
|
peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
2022-06-14 01:50:59 +00:00
|
|
|
peerName = "my-peer-s2"
|
|
|
|
defaultMeta = acl.DefaultEnterpriseMeta()
|
|
|
|
lastIdx = uint64(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering to the state store.
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
2022-06-21 18:04:08 +00:00
|
|
|
ID: peerID,
|
2022-06-14 01:50:59 +00:00
|
|
|
Name: peerName,
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Insert imported data: nodes, services, checks, trust bundle
|
|
|
|
lastIdx = insertTestPeeringData(t, s1.fsm.State(), peerName, lastIdx)
|
|
|
|
|
|
|
|
// Mark the peering for deletion to trigger the termination sequence.
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
2022-06-21 18:04:08 +00:00
|
|
|
ID: peerID,
|
2022-06-14 01:50:59 +00:00
|
|
|
Name: peerName,
|
|
|
|
DeletedAt: structs.TimeToProto(time.Now()),
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Ensure imported data is gone:
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, csn, err := s1.fsm.State().ServiceDump(nil, "", false, defaultMeta, peerName)
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Len(r, csn, 0)
|
|
|
|
|
|
|
|
_, checks, err := s1.fsm.State().ChecksInState(nil, api.HealthAny, defaultMeta, peerName)
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Len(r, checks, 0)
|
|
|
|
|
|
|
|
_, nodes, err := s1.fsm.State().NodeDump(nil, defaultMeta, peerName)
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Len(r, nodes, 0)
|
|
|
|
|
|
|
|
_, tb, err := s1.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: peerName})
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Nil(r, tb)
|
|
|
|
})
|
|
|
|
|
|
|
|
// The leader routine should pick up the deletion and finish deleting the peering.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
|
|
|
|
Value: peerName,
|
|
|
|
})
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.Nil(r, peering)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-15 18:58:33 +00:00
|
|
|
// Test that the dialing peer attempts to reestablish connections when the accepting peer
|
|
|
|
// shuts down without sending a Terminated message.
|
|
|
|
//
|
|
|
|
// To test this, we start the two peer servers (accepting and dialing), set up peering, and then shut down
|
|
|
|
// the accepting peer. This terminates the connection without sending a Terminated message.
|
|
|
|
// We then restart the accepting peer (we actually spin up a new server with the same config and port) and then
|
|
|
|
// assert that the dialing peer reestablishes the connection.
|
|
|
|
func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reserve a gRPC port so we can restart the accepting server with the same port.
|
|
|
|
ports := freeport.GetN(t, 1)
|
|
|
|
acceptingServerPort := ports[0]
|
|
|
|
|
|
|
|
_, acceptingServer := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "acceptingServer.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
c.GRPCPort = acceptingServerPort
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, acceptingServer.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a peering by generating a token.
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, acceptingServer.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(acceptingServer.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
|
|
|
req := pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "my-peer-dialing-server",
|
|
|
|
}
|
|
|
|
resp, err := peeringClient.GenerateToken(ctx, &req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
|
|
|
require.NoError(t, err)
|
|
|
|
var token structs.PeeringToken
|
|
|
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
|
|
|
|
|
|
|
var (
|
|
|
|
dialingServerPeerID = token.PeerID
|
|
|
|
acceptingServerPeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Bring up dialingServer and store acceptingServer's token so that it attempts to dial.
|
|
|
|
_, dialingServer := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "dialing-server.dc2"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc2"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, dialingServer.RPC, "dc2")
|
|
|
|
p := &pbpeering.Peering{
|
|
|
|
ID: acceptingServerPeerID,
|
|
|
|
Name: "my-peer-accepting-server",
|
|
|
|
PeerID: token.PeerID,
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
require.True(t, p.ShouldDial())
|
|
|
|
require.NoError(t, dialingServer.fsm.State().PeeringWrite(1000, p))
|
|
|
|
|
|
|
|
// Wait for the stream to be connected.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
status, found := dialingServer.peerStreamServer.StreamStatus(p.ID)
|
|
|
|
require.True(r, found)
|
|
|
|
require.True(r, status.Connected)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Wait until the dialing server has sent its roots over. This avoids a race condition where the accepting server
|
|
|
|
// shuts down, but the dialing server is still sending messages to the stream. When this happens, an error is raised
|
|
|
|
// which causes the stream to restart.
|
|
|
|
// In this test, we want to test what happens when the stream is closed when there are _no_ messages being sent.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, bundle, err := acceptingServer.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: "my-peer-dialing-server"})
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.NotNil(r, bundle)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Shutdown the accepting server.
|
|
|
|
require.NoError(t, acceptingServer.Shutdown())
|
|
|
|
// Have to manually shut down the gRPC server otherwise it stays bound to the port.
|
|
|
|
acceptingServer.externalGRPCServer.Stop()
|
|
|
|
|
|
|
|
// Mimic the server restarting by starting a new server with the same config.
|
|
|
|
_, acceptingServerRestart := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "acceptingServer.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
c.GRPCPort = acceptingServerPort
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1")
|
|
|
|
|
|
|
|
// Re-insert the peering state.
|
|
|
|
require.NoError(t, acceptingServerRestart.fsm.State().PeeringWrite(2000, &pbpeering.Peering{
|
|
|
|
ID: dialingServerPeerID,
|
|
|
|
Name: "my-peer-dialing-server",
|
|
|
|
State: pbpeering.PeeringState_PENDING,
|
|
|
|
}))
|
|
|
|
|
|
|
|
// The dialing peer should eventually reconnect.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
connStreams := acceptingServerRestart.peerStreamServer.ConnectedStreams()
|
|
|
|
require.Contains(r, connStreams, dialingServerPeerID)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-06-14 01:50:59 +00:00
|
|
|
func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastIdx uint64) uint64 {
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, store.PeeringTrustBundleWrite(lastIdx, &pbpeering.PeeringTrustBundle{
|
|
|
|
TrustDomain: "952e6bd1-f4d6-47f7-83ff-84b31babaa17",
|
|
|
|
PeerName: peer,
|
|
|
|
RootPEMs: []string{"certificate bundle"},
|
|
|
|
}))
|
|
|
|
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
|
|
|
Node: "aaa",
|
|
|
|
Address: "10.0.0.1",
|
|
|
|
PeerName: peer,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "a-service",
|
|
|
|
ID: "a-service-1",
|
|
|
|
Port: 8080,
|
|
|
|
PeerName: peer,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
{
|
|
|
|
CheckID: "a-service-1-check",
|
|
|
|
ServiceName: "a-service",
|
|
|
|
ServiceID: "a-service-1",
|
|
|
|
Node: "aaa",
|
|
|
|
PeerName: peer,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
|
|
|
Node: "bbb",
|
|
|
|
Address: "10.0.0.2",
|
|
|
|
PeerName: peer,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "b-service",
|
|
|
|
ID: "b-service-1",
|
|
|
|
Port: 8080,
|
|
|
|
PeerName: peer,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
{
|
|
|
|
CheckID: "b-service-1-check",
|
|
|
|
ServiceName: "b-service",
|
|
|
|
ServiceID: "b-service-1",
|
|
|
|
Node: "bbb",
|
|
|
|
PeerName: peer,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
|
|
|
Node: "ccc",
|
|
|
|
Address: "10.0.0.3",
|
|
|
|
PeerName: peer,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "c-service",
|
|
|
|
ID: "c-service-1",
|
|
|
|
Port: 8080,
|
|
|
|
PeerName: peer,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
{
|
|
|
|
CheckID: "c-service-1-check",
|
|
|
|
ServiceName: "c-service",
|
|
|
|
ServiceID: "c-service-1",
|
|
|
|
Node: "ccc",
|
|
|
|
PeerName: peer,
|
|
|
|
},
|
2022-07-15 17:20:43 +00:00
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
return lastIdx
|
|
|
|
}
|
|
|
|
|
2022-07-22 19:05:08 +00:00
|
|
|
// TODO(peering): once we move away from keeping state in stream tracker only on leaders, move this test to consul/server_test maybe
|
2022-07-18 17:20:04 +00:00
|
|
|
func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
|
2022-07-15 17:20:43 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(peering): Configure with TLS
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s1.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a peering by generating a token
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
|
|
|
|
|
|
|
req := pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
}
|
|
|
|
resp, err := peeringClient.GenerateToken(ctx, &req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var token structs.PeeringToken
|
|
|
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
|
|
|
|
|
|
|
var (
|
|
|
|
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
lastIdx = uint64(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
|
|
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s2.dc2"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc2"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
|
|
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
|
|
|
p := &pbpeering.Peering{
|
|
|
|
ID: s2PeerID,
|
|
|
|
Name: "my-peer-s1",
|
|
|
|
PeerID: token.PeerID,
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
require.True(t, p.ShouldDial())
|
|
|
|
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, p))
|
|
|
|
|
|
|
|
/// add services to S1 to be synced to S2
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.FSM().State().EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
|
|
|
ID: types.NodeID(generateUUID()),
|
|
|
|
Node: "aaa",
|
|
|
|
Address: "10.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "a-service",
|
|
|
|
ID: "a-service-1",
|
|
|
|
Port: 8080,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
2022-06-14 01:50:59 +00:00
|
|
|
{
|
2022-07-15 17:20:43 +00:00
|
|
|
CheckID: "a-service-1-check",
|
|
|
|
ServiceName: "a-service",
|
|
|
|
ServiceID: "a-service-1",
|
|
|
|
Node: "aaa",
|
2022-06-14 01:50:59 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
2022-07-15 17:20:43 +00:00
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.FSM().State().EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
|
|
|
ID: types.NodeID(generateUUID()),
|
|
|
|
|
|
|
|
Node: "bbb",
|
|
|
|
Address: "10.0.0.2",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "b-service",
|
|
|
|
ID: "b-service-1",
|
|
|
|
Port: 8080,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
{
|
|
|
|
CheckID: "b-service-1-check",
|
|
|
|
ServiceName: "b-service",
|
|
|
|
ServiceID: "b-service-1",
|
|
|
|
Node: "bbb",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.FSM().State().EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
|
|
|
ID: types.NodeID(generateUUID()),
|
|
|
|
|
|
|
|
Node: "ccc",
|
|
|
|
Address: "10.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "c-service",
|
|
|
|
ID: "c-service-1",
|
|
|
|
Port: 8080,
|
|
|
|
},
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
{
|
|
|
|
CheckID: "c-service-1-check",
|
|
|
|
ServiceName: "c-service",
|
|
|
|
ServiceID: "c-service-1",
|
|
|
|
Node: "ccc",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
/// finished adding services
|
|
|
|
|
|
|
|
type testCase struct {
|
2022-07-19 18:43:29 +00:00
|
|
|
name string
|
|
|
|
description string
|
|
|
|
exportedService structs.ExportedServicesConfigEntry
|
|
|
|
expectedImportedServsCount uint64
|
|
|
|
expectedExportedServsCount uint64
|
2022-07-15 17:20:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []testCase{
|
|
|
|
{
|
|
|
|
name: "wildcard",
|
|
|
|
description: "for a wildcard exported services, we want to see all services synced",
|
|
|
|
exportedService: structs.ExportedServicesConfigEntry{
|
|
|
|
Name: "default",
|
|
|
|
Services: []structs.ExportedService{
|
|
|
|
{
|
|
|
|
Name: structs.WildcardSpecifier,
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-07-19 18:43:29 +00:00
|
|
|
expectedImportedServsCount: 4, // 3 services from above + the "consul" service
|
|
|
|
expectedExportedServsCount: 4, // 3 services from above + the "consul" service
|
2022-07-15 17:20:43 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "no sync",
|
|
|
|
description: "update the config entry to allow no service sync",
|
|
|
|
exportedService: structs.ExportedServicesConfigEntry{
|
|
|
|
Name: "default",
|
|
|
|
},
|
2022-07-19 18:43:29 +00:00
|
|
|
expectedImportedServsCount: 0, // we want to see this decremented from 4 --> 0
|
|
|
|
expectedExportedServsCount: 0, // we want to see this decremented from 4 --> 0
|
2022-07-15 17:20:43 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "just a, b services",
|
|
|
|
description: "export just two services",
|
|
|
|
exportedService: structs.ExportedServicesConfigEntry{
|
|
|
|
Name: "default",
|
|
|
|
Services: []structs.ExportedService{
|
|
|
|
{
|
|
|
|
Name: "a-service",
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "b-service",
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-07-19 18:43:29 +00:00
|
|
|
expectedImportedServsCount: 2,
|
|
|
|
expectedExportedServsCount: 2,
|
2022-07-15 17:20:43 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "unexport b service",
|
|
|
|
description: "by unexporting b we want to see the count decrement eventually",
|
|
|
|
exportedService: structs.ExportedServicesConfigEntry{
|
|
|
|
Name: "default",
|
|
|
|
Services: []structs.ExportedService{
|
|
|
|
{
|
|
|
|
Name: "a-service",
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-07-19 18:43:29 +00:00
|
|
|
expectedImportedServsCount: 1,
|
|
|
|
expectedExportedServsCount: 1,
|
2022-07-15 17:20:43 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "export c service",
|
|
|
|
description: "now export the c service and expect the count to increment",
|
|
|
|
exportedService: structs.ExportedServicesConfigEntry{
|
|
|
|
Name: "default",
|
|
|
|
Services: []structs.ExportedService{
|
|
|
|
{
|
|
|
|
Name: "a-service",
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "c-service",
|
|
|
|
Consumers: []structs.ServiceConsumer{
|
|
|
|
{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-07-19 18:43:29 +00:00
|
|
|
expectedImportedServsCount: 2,
|
|
|
|
expectedExportedServsCount: 2,
|
2022-07-15 17:20:43 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
conn2, err := grpc.DialContext(ctx, s2.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn2.Close()
|
|
|
|
|
|
|
|
peeringClient2 := pbpeering.NewPeeringServiceClient(conn2)
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.fsm.State().EnsureConfigEntry(lastIdx, &tc.exportedService))
|
|
|
|
|
2022-07-18 17:20:04 +00:00
|
|
|
// Check that imported services count on S2 are what we expect
|
2022-07-15 17:20:43 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2022-07-18 17:20:04 +00:00
|
|
|
// on Read
|
|
|
|
resp, err := peeringClient2.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s1"})
|
2022-07-15 17:20:43 +00:00
|
|
|
require.NoError(r, err)
|
2022-07-18 17:20:04 +00:00
|
|
|
require.NotNil(r, resp.Peering)
|
2022-07-19 18:43:29 +00:00
|
|
|
require.Equal(r, tc.expectedImportedServsCount, resp.Peering.ImportedServiceCount)
|
2022-07-18 17:20:04 +00:00
|
|
|
|
|
|
|
// on List
|
|
|
|
resp2, err2 := peeringClient2.PeeringList(ctx, &pbpeering.PeeringListRequest{})
|
|
|
|
require.NoError(r, err2)
|
|
|
|
require.NotEmpty(r, resp2.Peerings)
|
2022-07-19 18:43:29 +00:00
|
|
|
require.Equal(r, tc.expectedExportedServsCount, resp2.Peerings[0].ImportedServiceCount)
|
2022-07-18 17:20:04 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Check that exported services count on S1 are what we expect
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
// on Read
|
|
|
|
resp, err := peeringClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-s2"})
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.NotNil(r, resp.Peering)
|
2022-07-19 18:43:29 +00:00
|
|
|
require.Equal(r, tc.expectedImportedServsCount, resp.Peering.ExportedServiceCount)
|
2022-07-18 17:20:04 +00:00
|
|
|
|
|
|
|
// on List
|
|
|
|
resp2, err2 := peeringClient.PeeringList(ctx, &pbpeering.PeeringListRequest{})
|
|
|
|
require.NoError(r, err2)
|
2022-07-15 17:20:43 +00:00
|
|
|
require.NotEmpty(r, resp2.Peerings)
|
2022-07-19 18:43:29 +00:00
|
|
|
require.Equal(r, tc.expectedExportedServsCount, resp2.Peerings[0].ExportedServiceCount)
|
2022-07-15 17:20:43 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2022-06-14 01:50:59 +00:00
|
|
|
}
|
2022-07-22 19:05:08 +00:00
|
|
|
|
|
|
|
// TODO(peering): once we move away from keeping state in stream tracker only on leaders, move this test to consul/server_test maybe
|
|
|
|
func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
s2PeerID1 = generateUUID()
|
|
|
|
s2PeerID2 = generateUUID()
|
|
|
|
testContextTimeout = 60 * time.Second
|
|
|
|
lastIdx = uint64(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
// TODO(peering): Configure with TLS
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s1.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a peering by generating a token
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout)
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
|
|
|
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
|
|
|
grpc.WithInsecure(),
|
|
|
|
grpc.WithBlock())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
|
|
|
|
|
|
|
req := pbpeering.GenerateTokenRequest{
|
|
|
|
PeerName: "my-peer-s2",
|
|
|
|
}
|
|
|
|
resp, err := peeringClient.GenerateToken(ctx, &req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var token structs.PeeringToken
|
|
|
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
|
|
|
|
|
|
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
|
|
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s2.dc2"
|
|
|
|
c.Datacenter = "dc2"
|
|
|
|
c.PrimaryDatacenter = "dc2"
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
|
|
|
|
|
|
// Simulate exporting services in the tracker
|
|
|
|
{
|
|
|
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
|
|
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
|
|
|
p := &pbpeering.Peering{
|
|
|
|
ID: s2PeerID1,
|
|
|
|
Name: "my-peer-s1",
|
|
|
|
PeerID: token.PeerID,
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
require.True(t, p.ShouldDial())
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, p))
|
|
|
|
|
|
|
|
p2 := &pbpeering.Peering{
|
|
|
|
ID: s2PeerID2,
|
|
|
|
Name: "my-peer-s3",
|
|
|
|
PeerID: token.PeerID, // doesn't much matter what these values are
|
|
|
|
PeerCAPems: token.CA,
|
|
|
|
PeerServerName: token.ServerName,
|
|
|
|
PeerServerAddresses: token.ServerAddresses,
|
|
|
|
}
|
|
|
|
require.True(t, p2.ShouldDial())
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, p2))
|
|
|
|
|
|
|
|
// connect the stream
|
|
|
|
mst1, err := s2.peeringServer.Tracker.Connected(s2PeerID1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// mimic tracking exported services
|
|
|
|
mst1.TrackExportedService(structs.ServiceName{Name: "a-service"})
|
|
|
|
mst1.TrackExportedService(structs.ServiceName{Name: "b-service"})
|
|
|
|
mst1.TrackExportedService(structs.ServiceName{Name: "c-service"})
|
|
|
|
|
|
|
|
// connect the stream
|
|
|
|
mst2, err := s2.peeringServer.Tracker.Connected(s2PeerID2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// mimic tracking exported services
|
|
|
|
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"})
|
|
|
|
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"})
|
|
|
|
}
|
|
|
|
|
|
|
|
// set up a metrics sink
|
|
|
|
sink := metrics.NewInmemSink(testContextTimeout, testContextTimeout)
|
|
|
|
cfg := metrics.DefaultConfig("us-west")
|
|
|
|
cfg.EnableHostname = false
|
|
|
|
met, err := metrics.New(cfg, sink)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
errM := s2.emitPeeringMetricsOnce(s2.logger, met)
|
|
|
|
require.NoError(t, errM)
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
intervals := sink.Data()
|
|
|
|
require.Len(r, intervals, 1)
|
|
|
|
intv := intervals[0]
|
|
|
|
|
|
|
|
// the keys for a Gauge value look like: {serviceName}.{prefix}.{key_name};{label=value};...
|
|
|
|
keyMetric1 := fmt.Sprintf("us-west.consul.peering.exported_services;peer_name=my-peer-s1;peer_id=%s", s2PeerID1)
|
|
|
|
metric1, ok := intv.Gauges[keyMetric1]
|
|
|
|
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric1))
|
|
|
|
|
|
|
|
require.Equal(r, float32(3), metric1.Value) // for a, b, c services
|
|
|
|
|
|
|
|
keyMetric2 := fmt.Sprintf("us-west.consul.peering.exported_services;peer_name=my-peer-s3;peer_id=%s", s2PeerID2)
|
|
|
|
metric2, ok := intv.Gauges[keyMetric2]
|
|
|
|
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2))
|
|
|
|
|
|
|
|
require.Equal(r, float32(2), metric2.Value) // for d, e services
|
|
|
|
})
|
|
|
|
}
|
2022-07-22 22:20:21 +00:00
|
|
|
|
|
|
|
// Test that the leader doesn't start its peering deletion routing when
|
|
|
|
// peering is disabled.
|
|
|
|
func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s1.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
c.PeeringEnabled = false
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
var (
|
|
|
|
peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
peerName = "my-peer-s2"
|
|
|
|
lastIdx = uint64(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering to the state store.
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
|
|
|
ID: peerID,
|
|
|
|
Name: peerName,
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Mark the peering for deletion to trigger the termination sequence.
|
|
|
|
lastIdx++
|
|
|
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
|
|
|
ID: peerID,
|
|
|
|
Name: peerName,
|
|
|
|
DeletedAt: structs.TimeToProto(time.Now()),
|
|
|
|
}))
|
|
|
|
|
|
|
|
// The leader routine shouldn't be running so the peering should never get deleted.
|
|
|
|
require.Never(t, func() bool {
|
|
|
|
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
|
|
|
|
Value: peerName,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("unexpected err: %s", err)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if peering == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}, 7*time.Second, 1*time.Second, "peering should not have been deleted")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that the leader doesn't start its peering establishment routine
|
|
|
|
// when peering is disabled.
|
|
|
|
func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.NodeName = "s1.dc1"
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.TLSConfig.Domain = "consul"
|
|
|
|
c.PeeringEnabled = false
|
|
|
|
})
|
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
|
|
|
|
|
|
|
var (
|
|
|
|
peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
|
|
|
peerName = "my-peer-s2"
|
|
|
|
lastIdx = uint64(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Simulate a peering initiation event by writing a peering to the state store.
|
|
|
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
|
|
|
ID: peerID,
|
|
|
|
Name: peerName,
|
|
|
|
PeerServerAddresses: []string{"1.2.3.4"},
|
|
|
|
}))
|
|
|
|
|
|
|
|
require.Never(t, func() bool {
|
|
|
|
_, found := s1.peerStreamTracker.StreamStatus(peerID)
|
|
|
|
return found
|
|
|
|
}, 7*time.Second, 1*time.Second, "peering should not have been established")
|
|
|
|
}
|
2022-07-29 20:04:32 +00:00
|
|
|
|
|
|
|
// Test peeringRetryTimeout when the errors are FailedPrecondition errors because these
|
|
|
|
// errors have a different backoff.
|
|
|
|
func TestLeader_Peering_peeringRetryTimeout_failedPreconditionErrors(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
failedAttempts uint
|
|
|
|
expDuration time.Duration
|
|
|
|
}{
|
|
|
|
// Constant time backoff.
|
|
|
|
{0, 8 * time.Millisecond},
|
|
|
|
{1, 8 * time.Millisecond},
|
|
|
|
{2, 8 * time.Millisecond},
|
|
|
|
{3, 8 * time.Millisecond},
|
|
|
|
{4, 8 * time.Millisecond},
|
|
|
|
{5, 8 * time.Millisecond},
|
|
|
|
// Then exponential.
|
|
|
|
{6, 16 * time.Millisecond},
|
|
|
|
{7, 32 * time.Millisecond},
|
|
|
|
{13, 2048 * time.Millisecond},
|
|
|
|
{14, 4096 * time.Millisecond},
|
|
|
|
{15, 8192 * time.Millisecond},
|
|
|
|
// Max.
|
|
|
|
{16, 8192 * time.Millisecond},
|
|
|
|
{17, 8192 * time.Millisecond},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(fmt.Sprintf("failed attempts %d", c.failedAttempts), func(t *testing.T) {
|
|
|
|
err := grpcstatus.Error(codes.FailedPrecondition, "msg")
|
|
|
|
require.Equal(t, c.expDuration, peeringRetryTimeout(c.failedAttempts, err))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test peeringRetryTimeout with non-FailedPrecondition errors because these errors have a different
|
|
|
|
// backoff from FailedPrecondition errors.
|
|
|
|
func TestLeader_Peering_peeringRetryTimeout_regularErrors(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
failedAttempts uint
|
|
|
|
expDuration time.Duration
|
|
|
|
}{
|
|
|
|
// Exponential.
|
|
|
|
{0, 1 * time.Second},
|
|
|
|
{1, 2 * time.Second},
|
|
|
|
{2, 4 * time.Second},
|
|
|
|
{3, 8 * time.Second},
|
|
|
|
// Until max.
|
|
|
|
{8, 256 * time.Second},
|
|
|
|
{9, 256 * time.Second},
|
|
|
|
{10, 256 * time.Second},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(fmt.Sprintf("failed attempts %d", c.failedAttempts), func(t *testing.T) {
|
|
|
|
err := errors.New("error")
|
|
|
|
require.Equal(t, c.expDuration, peeringRetryTimeout(c.failedAttempts, err))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test exercises all the functionality of retryLoopBackoffPeering.
|
|
|
|
func TestLeader_Peering_retryLoopBackoffPeering(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
logger := hclog.NewNullLogger()
|
|
|
|
|
|
|
|
// loopCount counts how many times we executed loopFn.
|
|
|
|
loopCount := 0
|
|
|
|
// loopTimes holds the times at which each loopFn was executed. We use this to test the timeout functionality.
|
|
|
|
var loopTimes []time.Time
|
|
|
|
// loopFn will run 5 times and do something different on each loop.
|
|
|
|
loopFn := func() error {
|
|
|
|
loopCount++
|
|
|
|
loopTimes = append(loopTimes, time.Now())
|
|
|
|
if loopCount == 1 {
|
|
|
|
return fmt.Errorf("error 1")
|
|
|
|
}
|
|
|
|
if loopCount == 2 {
|
|
|
|
return fmt.Errorf("error 2")
|
|
|
|
}
|
|
|
|
if loopCount == 3 {
|
|
|
|
// On the 3rd loop, return success which ends the loop.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// allErrors collects all the errors passed into errFn.
|
|
|
|
var allErrors []error
|
|
|
|
errFn := func(e error) {
|
|
|
|
allErrors = append(allErrors, e)
|
|
|
|
}
|
|
|
|
retryTimeFn := func(_ uint, _ error) time.Duration {
|
|
|
|
return 1 * time.Millisecond
|
|
|
|
}
|
|
|
|
|
|
|
|
retryLoopBackoffPeering(ctx, logger, loopFn, errFn, retryTimeFn)
|
|
|
|
|
|
|
|
// Ensure loopFn ran the number of expected times.
|
|
|
|
require.Equal(t, 3, loopCount)
|
|
|
|
// Ensure errFn ran as expected.
|
|
|
|
require.Equal(t, []error{
|
|
|
|
fmt.Errorf("error 1"),
|
|
|
|
fmt.Errorf("error 2"),
|
|
|
|
}, allErrors)
|
|
|
|
|
|
|
|
// Test retryTimeFn by comparing the difference between when each loopFn ran.
|
|
|
|
for i := range loopTimes {
|
|
|
|
if i == 0 {
|
|
|
|
// Can't compare first time.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
require.True(t, loopTimes[i].Sub(loopTimes[i-1]) >= 1*time.Millisecond,
|
|
|
|
"time between indices %d and %d was > 1ms", i, i-1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that if the context is cancelled the loop exits.
|
|
|
|
func TestLeader_Peering_retryLoopBackoffPeering_cancelContext(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
logger := hclog.NewNullLogger()
|
|
|
|
|
|
|
|
// loopCount counts how many times we executed loopFn.
|
|
|
|
loopCount := 0
|
|
|
|
loopFn := func() error {
|
|
|
|
loopCount++
|
|
|
|
return fmt.Errorf("error %d", loopCount)
|
|
|
|
}
|
|
|
|
// allErrors collects all the errors passed into errFn.
|
|
|
|
var allErrors []error
|
|
|
|
errFn := func(e error) {
|
|
|
|
allErrors = append(allErrors, e)
|
|
|
|
}
|
|
|
|
// Set the retry time to a huge number.
|
|
|
|
retryTimeFn := func(_ uint, _ error) time.Duration {
|
|
|
|
return 1 * time.Millisecond
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel the context before the loop runs. It should run once and then exit.
|
|
|
|
cancel()
|
|
|
|
retryLoopBackoffPeering(ctx, logger, loopFn, errFn, retryTimeFn)
|
|
|
|
|
|
|
|
// Ensure loopFn ran the number of expected times.
|
|
|
|
require.Equal(t, 1, loopCount)
|
|
|
|
// Ensure errFn ran as expected.
|
|
|
|
require.Equal(t, []error{
|
|
|
|
fmt.Errorf("error 1"),
|
|
|
|
}, allErrors)
|
|
|
|
}
|