peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"container/ring"
|
|
|
|
"context"
|
2022-08-01 14:33:18 +00:00
|
|
|
"errors"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"fmt"
|
2022-07-29 20:04:32 +00:00
|
|
|
"math"
|
2022-09-09 17:13:43 +00:00
|
|
|
"strings"
|
2022-07-15 18:15:50 +00:00
|
|
|
"time"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
2022-07-22 19:05:08 +00:00
|
|
|
"github.com/armon/go-metrics"
|
|
|
|
"github.com/armon/go-metrics/prometheus"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/go-memdb"
|
|
|
|
"github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/go-uuid"
|
2022-06-14 01:50:59 +00:00
|
|
|
"golang.org/x/time/rate"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"google.golang.org/grpc"
|
2022-07-29 20:04:32 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
2022-07-15 18:15:50 +00:00
|
|
|
"google.golang.org/grpc/keepalive"
|
2022-07-29 20:04:32 +00:00
|
|
|
grpcstatus "google.golang.org/grpc/status"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
2022-06-14 01:50:59 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
|
|
|
"github.com/hashicorp/consul/agent/consul/state"
|
2022-07-13 15:33:48 +00:00
|
|
|
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2022-06-14 01:50:59 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/logging"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
"github.com/hashicorp/consul/proto/pbpeering"
|
2022-07-08 17:01:13 +00:00
|
|
|
"github.com/hashicorp/consul/proto/pbpeerstream"
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
)
|
|
|
|
|
2022-09-09 17:13:43 +00:00
|
|
|
var leaderExportedServicesCountKeyDeprecated = []string{"consul", "peering", "exported_services"}
|
|
|
|
var leaderExportedServicesCountKey = []string{"peering", "exported_services"}
|
|
|
|
var leaderHealthyPeeringKeyDeprecated = []string{"consul", "peering", "healthy"}
|
|
|
|
var leaderHealthyPeeringKey = []string{"peering", "healthy"}
|
2022-07-22 19:05:08 +00:00
|
|
|
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
|
2022-09-09 17:13:43 +00:00
|
|
|
{
|
|
|
|
Name: leaderExportedServicesCountKeyDeprecated,
|
|
|
|
Help: fmt.Sprint("Deprecated - please use ", strings.Join(leaderExportedServicesCountKey, "_")),
|
|
|
|
},
|
2022-07-22 19:05:08 +00:00
|
|
|
{
|
|
|
|
Name: leaderExportedServicesCountKey,
|
|
|
|
Help: "A gauge that tracks how many services are exported for the peering. " +
|
2022-08-25 23:32:59 +00:00
|
|
|
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
|
|
|
|
"We emit this metric every 9 seconds",
|
|
|
|
},
|
2022-09-09 17:13:43 +00:00
|
|
|
{
|
|
|
|
Name: leaderHealthyPeeringKeyDeprecated,
|
|
|
|
Help: fmt.Sprint("Deprecated - please use ", strings.Join(leaderExportedServicesCountKey, "_")),
|
|
|
|
},
|
2022-08-25 23:32:59 +00:00
|
|
|
{
|
|
|
|
Name: leaderHealthyPeeringKey,
|
|
|
|
Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " +
|
|
|
|
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
|
2022-07-22 19:05:08 +00:00
|
|
|
"We emit this metric every 9 seconds",
|
|
|
|
},
|
|
|
|
}
|
2022-07-29 20:04:32 +00:00
|
|
|
var (
|
|
|
|
// fastConnRetryTimeout is how long we wait between retrying connections following the "fast" path
|
|
|
|
// which is triggered on specific connection errors.
|
|
|
|
fastConnRetryTimeout = 8 * time.Millisecond
|
|
|
|
// maxFastConnRetries is the maximum number of fast connection retries before we follow exponential backoff.
|
|
|
|
maxFastConnRetries = uint(5)
|
|
|
|
// maxFastRetryBackoff is the maximum amount of time we'll wait between retries following the fast path.
|
|
|
|
maxFastRetryBackoff = 8192 * time.Millisecond
|
2022-10-31 18:50:58 +00:00
|
|
|
// maxRetryBackoffPeering is the maximum number of seconds we'll wait between retries when attempting to re-establish a peering connection.
|
|
|
|
maxRetryBackoffPeering = 64
|
2022-07-29 20:04:32 +00:00
|
|
|
)
|
2022-07-22 19:05:08 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
func (s *Server) startPeeringStreamSync(ctx context.Context) {
|
|
|
|
s.leaderRoutineManager.Start(ctx, peeringStreamsRoutineName, s.runPeeringSync)
|
2022-07-22 19:05:08 +00:00
|
|
|
s.leaderRoutineManager.Start(ctx, peeringStreamsMetricsRoutineName, s.runPeeringMetrics)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) runPeeringMetrics(ctx context.Context) error {
|
|
|
|
ticker := time.NewTicker(s.config.MetricsReportingInterval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
logger := s.logger.Named(logging.PeeringMetrics)
|
|
|
|
defaultMetrics := metrics.Default
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
logger.Info("stopping peering metrics")
|
|
|
|
|
|
|
|
// "Zero-out" the metric on exit so that when prometheus scrapes this
|
|
|
|
// metric from a non-leader, it does not get a stale value.
|
2022-09-09 17:13:43 +00:00
|
|
|
metrics.SetGauge(leaderExportedServicesCountKeyDeprecated, float32(0))
|
2022-07-22 19:05:08 +00:00
|
|
|
metrics.SetGauge(leaderExportedServicesCountKey, float32(0))
|
|
|
|
return nil
|
|
|
|
case <-ticker.C:
|
2022-10-24 16:48:02 +00:00
|
|
|
if err := s.emitPeeringMetricsOnce(defaultMetrics()); err != nil {
|
2022-07-22 19:05:08 +00:00
|
|
|
s.logger.Error("error emitting peering stream metrics", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-24 16:48:02 +00:00
|
|
|
func (s *Server) emitPeeringMetricsOnce(metricsImpl *metrics.Metrics) error {
|
2022-07-22 19:05:08 +00:00
|
|
|
_, peers, err := s.fsm.State().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, peer := range peers {
|
|
|
|
part := peer.Partition
|
|
|
|
labels := []metrics.Label{
|
|
|
|
{Name: "peer_name", Value: peer.Name},
|
|
|
|
{Name: "peer_id", Value: peer.ID},
|
|
|
|
}
|
|
|
|
if part != "" {
|
|
|
|
labels = append(labels, metrics.Label{Name: "partition", Value: part})
|
|
|
|
}
|
|
|
|
|
2022-08-25 23:32:59 +00:00
|
|
|
status, found := s.peerStreamServer.StreamStatus(peer.ID)
|
|
|
|
if found {
|
|
|
|
// exported services count metric
|
|
|
|
esc := status.GetExportedServicesCount()
|
2022-09-09 17:13:43 +00:00
|
|
|
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKeyDeprecated, float32(esc), labels)
|
2022-08-25 23:32:59 +00:00
|
|
|
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
// peering health metric
|
2022-10-28 14:51:12 +00:00
|
|
|
healthy := 0
|
|
|
|
switch {
|
|
|
|
case status.NeverConnected:
|
|
|
|
case s.peerStreamServer.Tracker.IsHealthy(status):
|
|
|
|
healthy = 1
|
2022-08-25 23:32:59 +00:00
|
|
|
}
|
2022-10-28 14:51:12 +00:00
|
|
|
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKeyDeprecated, float32(healthy), labels)
|
|
|
|
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthy), labels)
|
2022-07-22 19:05:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) runPeeringSync(ctx context.Context) error {
|
|
|
|
logger := s.logger.Named("peering-syncer")
|
|
|
|
cancelFns := make(map[string]context.CancelFunc)
|
|
|
|
|
|
|
|
retryLoopBackoff(ctx, func() error {
|
|
|
|
if err := s.syncPeeringsAndBlock(ctx, logger, cancelFns); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}, func(err error) {
|
|
|
|
s.logger.Error("error syncing peering streams from state store", "error", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) stopPeeringStreamSync() {
|
|
|
|
// will be a no-op when not started
|
|
|
|
s.leaderRoutineManager.Stop(peeringStreamsRoutineName)
|
2022-07-22 19:05:08 +00:00
|
|
|
s.leaderRoutineManager.Stop(peeringStreamsMetricsRoutineName)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching
|
|
|
|
// changes to peerings in the state store and managing streams to those peers.
|
|
|
|
func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger, cancelFns map[string]context.CancelFunc) error {
|
2022-06-07 21:29:09 +00:00
|
|
|
// We have to be careful not to introduce a data race here. We want to
|
|
|
|
// compare the current known peerings in the state store with known
|
|
|
|
// connected streams to know when we should TERMINATE stray peerings.
|
|
|
|
//
|
|
|
|
// If you read the current peerings from the state store, then read the
|
|
|
|
// current established streams you could lose the data race and have the
|
|
|
|
// sequence of events be:
|
|
|
|
//
|
|
|
|
// 1. list peerings [A,B,C]
|
|
|
|
// 2. persist new peering [D]
|
|
|
|
// 3. accept new stream for [D]
|
|
|
|
// 4. list streams [A,B,C,D]
|
|
|
|
// 5. terminate [D]
|
|
|
|
//
|
|
|
|
// Which is wrong. If we instead ensure that (4) happens before (1), given
|
|
|
|
// that you can't get an established stream without first passing a "does
|
|
|
|
// this peering exist in the state store?" inquiry then this happens:
|
|
|
|
//
|
|
|
|
// 1. list streams [A,B,C]
|
|
|
|
// 2. list peerings [A,B,C]
|
|
|
|
// 3. persist new peering [D]
|
|
|
|
// 4. accept new stream for [D]
|
|
|
|
// 5. terminate []
|
|
|
|
//
|
|
|
|
// Or even this is fine:
|
|
|
|
//
|
|
|
|
// 1. list streams [A,B,C]
|
|
|
|
// 2. persist new peering [D]
|
|
|
|
// 3. accept new stream for [D]
|
|
|
|
// 4. list peerings [A,B,C,D]
|
|
|
|
// 5. terminate []
|
2022-07-08 17:01:13 +00:00
|
|
|
connectedStreams := s.peerStreamServer.ConnectedStreams()
|
2022-06-07 21:29:09 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
state := s.fsm.State()
|
|
|
|
|
|
|
|
// Pull the state store contents and set up to block for changes.
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
ws.Add(state.AbandonCh())
|
|
|
|
ws.Add(ctx.Done())
|
|
|
|
|
|
|
|
_, peers, err := state.PeeringList(ws, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(peering) Adjust this debug info.
|
|
|
|
// Generate a UUID to trace different passes through this function.
|
|
|
|
seq, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Debug("failed to generate sequence uuid while syncing peerings")
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Trace("syncing new list of peers", "num_peers", len(peers), "sequence_id", seq)
|
|
|
|
|
|
|
|
// Stored tracks the unique set of peers that should be dialed.
|
|
|
|
// It is used to reconcile the list of active streams.
|
|
|
|
stored := make(map[string]struct{})
|
|
|
|
|
|
|
|
var merr *multierror.Error
|
|
|
|
|
|
|
|
// Create connections and streams to peers in the state store that do not have an active stream.
|
|
|
|
for _, peer := range peers {
|
|
|
|
logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq)
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
if !peer.IsActive() {
|
2022-06-14 14:39:23 +00:00
|
|
|
// The peering was marked for deletion by ourselves or our peer, no need to dial or track them.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
// Track all active peerings,since the reconciliation loop below applies to the token generator as well.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
stored[peer.ID] = struct{}{}
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
if !peer.ShouldDial() {
|
|
|
|
// We do not need to dial peerings where we generated the peering token.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
// We may have written this peering to the store to trigger xDS updates, but still in the process of establishing.
|
|
|
|
// If there isn't a secret yet, we're still trying to reach the other server.
|
|
|
|
logger.Trace("reading peering secret", "sequence_id", seq)
|
|
|
|
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to read secret for peering: %w", err)
|
|
|
|
}
|
|
|
|
if secret.GetStream().GetActiveSecretID() == "" {
|
|
|
|
continue
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
status, found := s.peerStreamServer.StreamStatus(peer.ID)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if found && status.Connected {
|
|
|
|
// Nothing to do when we already have an active stream to the peer.
|
2022-10-10 19:54:36 +00:00
|
|
|
// Updated data will only be used if the stream becomes disconnected
|
|
|
|
// since there's no need to tear down an active stream.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
logger.Trace("ensuring stream to peer", "peer_id", peer.ID, "sequence_id", seq)
|
|
|
|
|
|
|
|
if cancel, ok := cancelFns[peer.ID]; ok {
|
|
|
|
// If the peer is known but we're not connected, clean up the retry-er and start over.
|
|
|
|
// There may be new data in the state store that would enable us to get out of an error state.
|
|
|
|
logger.Trace("cancelling context to re-establish stream", "peer_id", peer.ID, "sequence_id", seq)
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
if err := s.establishStream(ctx, logger, peer, secret, cancelFns); err != nil {
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs.
|
|
|
|
// Lockable status isn't available here though. Could report it via the peering.Service?
|
|
|
|
logger.Error("error establishing peering stream", "peer_id", peer.ID, "error", err)
|
|
|
|
merr = multierror.Append(merr, err)
|
|
|
|
|
|
|
|
// Continue on errors to avoid one bad peering from blocking the establishment and cleanup of others.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-01 21:06:18 +00:00
|
|
|
logger.Trace("checking connected streams", "streams", connectedStreams, "sequence_id", seq)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
|
|
|
// Clean up active streams of peerings that were deleted from the state store.
|
2022-06-07 21:29:09 +00:00
|
|
|
for stream, doneCh := range connectedStreams {
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if _, ok := stored[stream]; ok {
|
|
|
|
// Active stream is in the state store, nothing to do.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
// channel is closed, do nothing to avoid a panic
|
|
|
|
default:
|
|
|
|
logger.Trace("tearing down stream for deleted peer", "peer_id", stream, "sequence_id", seq)
|
|
|
|
close(doneCh)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Trace("blocking for changes", "sequence_id", seq)
|
|
|
|
|
|
|
|
// Block for any changes to the state store.
|
|
|
|
ws.WatchCtx(ctx)
|
|
|
|
|
|
|
|
logger.Trace("unblocked", "sequence_id", seq)
|
|
|
|
return merr.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
func (s *Server) establishStream(ctx context.Context,
|
|
|
|
logger hclog.Logger,
|
|
|
|
peer *pbpeering.Peering,
|
|
|
|
secret *pbpeering.PeeringSecrets,
|
|
|
|
cancelFns map[string]context.CancelFunc) error {
|
2022-06-13 14:22:46 +00:00
|
|
|
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
|
|
|
|
|
2022-08-01 14:33:18 +00:00
|
|
|
if peer.PeerID == "" {
|
|
|
|
return fmt.Errorf("expected PeerID to be non empty; the wrong end of peering is being dialed")
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsOption, err := peer.TLSDialOption()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
}
|
|
|
|
|
2022-08-01 14:33:18 +00:00
|
|
|
if secret.GetStream().GetActiveSecretID() == "" {
|
|
|
|
return errors.New("missing stream secret for peering stream authorization, peering must be re-established")
|
|
|
|
}
|
|
|
|
|
2022-06-13 14:22:46 +00:00
|
|
|
logger.Trace("establishing stream to peer")
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
2022-08-29 20:32:26 +00:00
|
|
|
streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
|
2022-07-15 18:15:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to register stream: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-08-26 14:27:13 +00:00
|
|
|
streamCtx, cancel := context.WithCancel(ctx)
|
|
|
|
cancelFns[peer.ID] = cancel
|
|
|
|
|
|
|
|
// Start a goroutine to watch for updates to peer server addresses.
|
|
|
|
// The latest valid server address can be received from nextServerAddr.
|
|
|
|
nextServerAddr := make(chan string)
|
2022-10-10 19:54:36 +00:00
|
|
|
go s.watchAddresses(streamCtx, peer.ID, nextServerAddr)
|
2022-08-26 14:27:13 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
|
2022-08-26 14:27:13 +00:00
|
|
|
go retryLoopBackoffPeering(streamCtx, logger, func() error {
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Try a new address on each iteration by advancing the ring buffer on errors.
|
2022-10-10 19:54:36 +00:00
|
|
|
addr, stillOpen := <-nextServerAddr
|
|
|
|
if !stillOpen {
|
|
|
|
// If the channel was closed that means the context was canceled, so we return.
|
|
|
|
return streamCtx.Err()
|
|
|
|
}
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
2022-10-13 20:46:51 +00:00
|
|
|
opts := []grpc.DialOption{
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
tlsOption,
|
2022-10-10 19:54:36 +00:00
|
|
|
// TODO(peering): Use a grpc.WithStatsHandler here.
|
|
|
|
// This should wait until the grpc-external server is wired up with a stats handler in NET-50.
|
2022-07-15 18:15:50 +00:00
|
|
|
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
|
|
|
|
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
|
|
|
Time: 30 * time.Second,
|
|
|
|
Timeout: 10 * time.Second,
|
|
|
|
// send keepalive pings even if there is no active streams
|
|
|
|
PermitWithoutStream: true,
|
|
|
|
}),
|
2022-10-13 20:46:51 +00:00
|
|
|
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(50 * 1024 * 1024)),
|
|
|
|
}
|
2022-10-13 21:55:55 +00:00
|
|
|
|
2022-10-13 20:46:51 +00:00
|
|
|
logger.Trace("dialing peer", "addr", addr)
|
|
|
|
conn, err := grpc.DialContext(streamCtx, addr, opts...)
|
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to dial: %w", err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
client := pbpeerstream.NewPeerStreamServiceClient(conn)
|
2022-08-26 14:27:13 +00:00
|
|
|
stream, err := client.StreamResources(streamCtx)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-01 14:33:18 +00:00
|
|
|
initialReq := &pbpeerstream.ReplicationMessage{
|
|
|
|
Payload: &pbpeerstream.ReplicationMessage_Open_{
|
|
|
|
Open: &pbpeerstream.ReplicationMessage_Open{
|
|
|
|
PeerID: peer.PeerID,
|
|
|
|
StreamSecretID: secret.GetStream().GetActiveSecretID(),
|
2022-10-05 13:10:19 +00:00
|
|
|
Remote: &pbpeering.RemoteInfo{
|
|
|
|
Partition: peer.Partition,
|
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
},
|
2022-08-01 14:33:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := stream.Send(initialReq); err != nil {
|
|
|
|
return fmt.Errorf("failed to send initial stream request: %w", err)
|
2022-07-13 15:00:35 +00:00
|
|
|
}
|
|
|
|
|
2022-07-08 17:01:13 +00:00
|
|
|
streamReq := peerstream.HandleStreamRequest{
|
2022-05-12 21:04:44 +00:00
|
|
|
LocalID: peer.ID,
|
|
|
|
RemoteID: peer.PeerID,
|
|
|
|
PeerName: peer.Name,
|
|
|
|
Partition: peer.Partition,
|
|
|
|
Stream: stream,
|
2022-06-13 14:22:46 +00:00
|
|
|
}
|
2022-07-08 17:01:13 +00:00
|
|
|
err = s.peerStreamServer.HandleStream(streamReq)
|
2022-06-13 14:22:46 +00:00
|
|
|
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
if err == nil {
|
2022-06-13 14:22:46 +00:00
|
|
|
stream.CloseSend()
|
2022-07-08 17:01:13 +00:00
|
|
|
s.peerStreamServer.DrainStream(streamReq)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
cancel()
|
2022-06-13 14:22:46 +00:00
|
|
|
logger.Info("closed outbound stream")
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
|
|
|
|
}, func(err error) {
|
2022-07-29 20:04:32 +00:00
|
|
|
// TODO(peering): why are we using TrackSendError here? This could also be a receive error.
|
2022-07-15 18:15:50 +00:00
|
|
|
streamStatus.TrackSendError(err.Error())
|
2022-10-10 19:54:36 +00:00
|
|
|
|
|
|
|
switch {
|
|
|
|
case isErrCode(err, codes.FailedPrecondition):
|
2022-07-29 20:04:32 +00:00
|
|
|
logger.Debug("stream disconnected due to 'failed precondition' error; reconnecting",
|
|
|
|
"error", err)
|
2022-10-10 19:54:36 +00:00
|
|
|
|
|
|
|
case isErrCode(err, codes.ResourceExhausted):
|
2022-10-13 20:46:51 +00:00
|
|
|
logger.Debug("stream disconnected due to 'resource exhausted' error; reconnecting",
|
|
|
|
"error", err)
|
2022-10-10 19:54:36 +00:00
|
|
|
|
|
|
|
case errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded):
|
|
|
|
logger.Debug("stream context was canceled", "error", err)
|
|
|
|
|
|
|
|
case err != nil:
|
|
|
|
logger.Error("error managing peering stream", "error", err)
|
2022-07-29 20:04:32 +00:00
|
|
|
}
|
|
|
|
}, peeringRetryTimeout)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
// watchAddresses sends an up-to-date address to nextServerAddr.
|
|
|
|
// These could be either remote peer server addresses, or local mesh gateways.
|
|
|
|
// The function loads the addresses into a ring buffer and cycles through them until:
|
|
|
|
// 1. streamCtx is cancelled (peer is deleted or we're re-establishing the stream with new data)
|
|
|
|
// 2. the peer, Mesh config entry, or (optionally) mesh gateway address set is modified, and the watchset fires.
|
2022-08-26 14:27:13 +00:00
|
|
|
//
|
2022-10-10 19:54:36 +00:00
|
|
|
// In case (2) we re-fetch all the data sources and rebuild the ring buffer.
|
|
|
|
// In the event that the PeerThroughMeshGateways is set in the Mesh entry, we front-load the ring buffer with
|
|
|
|
// local mesh gateway addresses, so we can try those first, with the option to fall back to remote server addresses.
|
|
|
|
func (s *Server) watchAddresses(ctx context.Context, peerID string, nextServerAddr chan<- string) {
|
2022-08-26 14:27:13 +00:00
|
|
|
defer close(nextServerAddr)
|
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
var ringbuf *ring.Ring
|
|
|
|
var ws memdb.WatchSet
|
2022-08-26 14:27:13 +00:00
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
fetchAddresses := func() error {
|
|
|
|
// Re-instantiate ws since it can only be watched once.
|
|
|
|
ws = memdb.NewWatchSet()
|
|
|
|
|
|
|
|
newRing, _, err := s.peeringBackend.GetDialAddresses(s.logger, ws, peerID)
|
2022-08-26 14:27:13 +00:00
|
|
|
if err != nil {
|
2022-10-10 19:54:36 +00:00
|
|
|
return fmt.Errorf("failed to fetch updated addresses to dial peer: %w", err)
|
2022-08-26 14:27:13 +00:00
|
|
|
}
|
2022-10-10 19:54:36 +00:00
|
|
|
ringbuf = newRing
|
2022-08-26 14:27:13 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-10 19:54:36 +00:00
|
|
|
// Initialize the first ring buffer.
|
|
|
|
if err := fetchAddresses(); err != nil {
|
|
|
|
s.logger.Warn("error fetching addresses", "peer_id", peerID, "error", err)
|
|
|
|
}
|
|
|
|
|
2022-08-26 14:27:13 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case nextServerAddr <- ringbuf.Value.(string):
|
|
|
|
ringbuf = ringbuf.Next()
|
2022-10-10 19:54:36 +00:00
|
|
|
|
|
|
|
case err := <-ws.WatchCh(ctx):
|
2022-08-26 14:27:13 +00:00
|
|
|
if err != nil {
|
2022-10-10 19:54:36 +00:00
|
|
|
// Context was cancelled.
|
2022-08-26 14:27:13 +00:00
|
|
|
return
|
|
|
|
}
|
2022-10-10 19:54:36 +00:00
|
|
|
|
|
|
|
// Watch fired so we re-fetch the necessary addresses and replace the ring buffer.
|
|
|
|
if err := fetchAddresses(); err != nil {
|
|
|
|
s.logger.Warn("watch for new addresses fired but the address list to dial may not have been updated",
|
|
|
|
"peer_id", peerID,
|
2022-08-26 14:27:13 +00:00
|
|
|
"error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-14 01:50:59 +00:00
|
|
|
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
|
|
|
|
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
|
|
|
|
}
|
|
|
|
|
|
|
|
// runPeeringDeletions watches for peerings marked for deletions and then cleans up data for them.
|
|
|
|
func (s *Server) runPeeringDeletions(ctx context.Context) error {
|
|
|
|
logger := s.loggers.Named(logging.Peering)
|
|
|
|
|
|
|
|
// This limiter's purpose is to control the rate of raft applies caused by the deferred deletion
|
|
|
|
// process. This includes deletion of the peerings themselves in addition to any peering data
|
|
|
|
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
|
|
|
|
for {
|
2022-08-25 15:25:59 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2022-06-14 01:50:59 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
state := s.fsm.State()
|
|
|
|
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)
|
|
|
|
if err != nil {
|
|
|
|
logger.Warn("encountered an error while searching for deleted peerings", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(peerings) == 0 {
|
|
|
|
ws.Add(state.AbandonCh())
|
|
|
|
|
|
|
|
// wait for a peering to be deleted or the routine to be cancelled
|
|
|
|
if err := ws.WatchCtx(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, p := range peerings {
|
2022-06-14 14:39:23 +00:00
|
|
|
s.removePeeringAndData(ctx, logger, raftLimiter, p)
|
2022-06-14 01:50:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// removepPeeringAndData removes data imported for a peering and the peering itself.
|
2022-06-14 14:39:23 +00:00
|
|
|
func (s *Server) removePeeringAndData(ctx context.Context, logger hclog.Logger, limiter *rate.Limiter, peer *pbpeering.Peering) {
|
|
|
|
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
|
|
|
|
entMeta := *structs.NodeEnterpriseMetaInPartition(peer.Partition)
|
2022-06-14 01:50:59 +00:00
|
|
|
|
|
|
|
// First delete all imported data.
|
|
|
|
// By deleting all imported nodes we also delete all services and checks registered on them.
|
2022-06-14 14:39:23 +00:00
|
|
|
if err := s.deleteAllNodes(ctx, limiter, entMeta, peer.Name); err != nil {
|
2022-06-14 01:50:59 +00:00
|
|
|
logger.Error("Failed to remove Nodes for peer", "error", err)
|
|
|
|
return
|
|
|
|
}
|
2022-06-14 14:39:23 +00:00
|
|
|
if err := s.deleteTrustBundleFromPeer(ctx, limiter, entMeta, peer.Name); err != nil {
|
2022-06-14 01:50:59 +00:00
|
|
|
logger.Error("Failed to remove trust bundle for peer", "error", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := limiter.Wait(ctx); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-14 14:39:23 +00:00
|
|
|
if peer.State == pbpeering.PeeringState_TERMINATED {
|
|
|
|
// For peerings terminated by our peer we only clean up the local data, we do not delete the peering itself.
|
|
|
|
// This is to avoid a situation where the peering disappears without the local operator's knowledge.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-14 01:50:59 +00:00
|
|
|
// Once all imported data is deleted, the peering itself is also deleted.
|
2022-06-14 14:39:23 +00:00
|
|
|
req := &pbpeering.PeeringDeleteRequest{
|
|
|
|
Name: peer.Name,
|
|
|
|
Partition: acl.PartitionOrDefault(peer.Partition),
|
2022-06-14 01:50:59 +00:00
|
|
|
}
|
2022-06-14 14:39:23 +00:00
|
|
|
_, err := s.raftApplyProtobuf(structs.PeeringDeleteType, req)
|
2022-06-14 01:50:59 +00:00
|
|
|
if err != nil {
|
|
|
|
logger.Error("failed to apply full peering deletion", "error", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteAllNodes will delete all nodes in a partition or all nodes imported from a given peer name.
|
|
|
|
func (s *Server) deleteAllNodes(ctx context.Context, limiter *rate.Limiter, entMeta acl.EnterpriseMeta, peerName string) error {
|
|
|
|
// Same as ACL batch upsert size
|
|
|
|
nodeBatchSizeBytes := 256 * 1024
|
|
|
|
|
|
|
|
_, nodes, err := s.fsm.State().NodeDump(nil, &entMeta, peerName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
for {
|
|
|
|
var ops structs.TxnOps
|
|
|
|
for batchSize := 0; batchSize < nodeBatchSizeBytes && i < len(nodes); i++ {
|
|
|
|
entry := nodes[i]
|
|
|
|
|
|
|
|
op := structs.TxnOp{
|
|
|
|
Node: &structs.TxnNodeOp{
|
|
|
|
Verb: api.NodeDelete,
|
|
|
|
Node: structs.Node{
|
|
|
|
Node: entry.Node,
|
|
|
|
Partition: entry.Partition,
|
|
|
|
PeerName: entry.PeerName,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
ops = append(ops, &op)
|
|
|
|
|
|
|
|
// Add entries to the transaction until it reaches the max batch size
|
2022-06-14 14:39:23 +00:00
|
|
|
batchSize += len(entry.Node) + len(entry.Partition) + len(entry.PeerName)
|
2022-06-14 01:50:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send each batch as a TXN Req to avoid sending one at a time
|
|
|
|
req := structs.TxnRequest{
|
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Ops: ops,
|
|
|
|
}
|
|
|
|
if len(req.Ops) > 0 {
|
|
|
|
if err := limiter.Wait(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := s.raftApplyMsgpack(structs.TxnRequestType, &req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteTrustBundleFromPeer deletes the trust bundle imported from a peer, if present.
|
|
|
|
func (s *Server) deleteTrustBundleFromPeer(ctx context.Context, limiter *rate.Limiter, entMeta acl.EnterpriseMeta, peerName string) error {
|
|
|
|
_, bundle, err := s.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: peerName, EnterpriseMeta: entMeta})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if bundle == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := limiter.Wait(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-14 14:39:23 +00:00
|
|
|
req := &pbpeering.PeeringTrustBundleDeleteRequest{
|
2022-06-14 01:50:59 +00:00
|
|
|
Name: peerName,
|
|
|
|
Partition: entMeta.PartitionOrDefault(),
|
|
|
|
}
|
2022-06-14 14:39:23 +00:00
|
|
|
_, err = s.raftApplyProtobuf(structs.PeeringTrustBundleDeleteType, req)
|
2022-06-14 01:50:59 +00:00
|
|
|
return err
|
|
|
|
}
|
2022-07-29 20:04:32 +00:00
|
|
|
|
|
|
|
// retryLoopBackoffPeering re-runs loopFn with a backoff on error. errFn is run whenever
|
|
|
|
// loopFn returns an error. retryTimeFn is used to calculate the time between retries on error.
|
|
|
|
// It is passed the number of errors in a row that loopFn has returned and the latest error
|
|
|
|
// from loopFn.
|
|
|
|
//
|
|
|
|
// This function is modelled off of retryLoopBackoffHandleSuccess but is specific to peering
|
|
|
|
// because peering needs to use different retry times depending on which error is returned.
|
|
|
|
// This function doesn't use a rate limiter, unlike retryLoopBackoffHandleSuccess, because
|
|
|
|
// the rate limiter is only needed in the success case when loopFn returns nil and we want to
|
|
|
|
// loop again. In the peering case, we exit on a successful loop so we don't need the limter.
|
|
|
|
func retryLoopBackoffPeering(ctx context.Context, logger hclog.Logger, loopFn func() error, errFn func(error),
|
|
|
|
retryTimeFn func(failedAttempts uint, loopErr error) time.Duration) {
|
|
|
|
var failedAttempts uint
|
|
|
|
var err error
|
|
|
|
for {
|
|
|
|
if err = loopFn(); err != nil {
|
|
|
|
errFn(err)
|
|
|
|
|
|
|
|
if failedAttempts < math.MaxUint {
|
|
|
|
failedAttempts++
|
|
|
|
}
|
|
|
|
|
|
|
|
retryTime := retryTimeFn(failedAttempts, err)
|
|
|
|
logger.Trace("in connection retry backoff", "delay", retryTime)
|
|
|
|
timer := time.NewTimer(retryTime)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
timer.Stop()
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// peeringRetryTimeout returns the time that should be waited between re-establishing a peering
|
|
|
|
// connection after an error. We follow the default backoff from retryLoopBackoff
|
|
|
|
// unless the error is a "failed precondition" error in which case we retry much more quickly.
|
|
|
|
// Retrying quickly is important in the case of a failed precondition error because we expect it to resolve
|
|
|
|
// quickly. For example in the case of connecting with a follower through a load balancer, we just need to retry
|
|
|
|
// until our request lands on a leader.
|
|
|
|
func peeringRetryTimeout(failedAttempts uint, loopErr error) time.Duration {
|
2022-10-13 20:46:51 +00:00
|
|
|
if loopErr != nil && isErrCode(loopErr, codes.FailedPrecondition) {
|
2022-07-29 20:04:32 +00:00
|
|
|
// Wait a constant time for the first number of retries.
|
|
|
|
if failedAttempts <= maxFastConnRetries {
|
|
|
|
return fastConnRetryTimeout
|
|
|
|
}
|
|
|
|
// From here, follow an exponential backoff maxing out at maxFastRetryBackoff.
|
|
|
|
// The below equation multiples the constantRetryTimeout by 2^n where n is the number of failed attempts
|
|
|
|
// we're on, starting at 1 now that we're past our maxFastConnRetries.
|
|
|
|
// For example if fastConnRetryTimeout == 8ms and maxFastConnRetries == 5, then at 6 failed retries
|
|
|
|
// we'll do 8ms * 2^1 = 16ms, then 8ms * 2^2 = 32ms, etc.
|
|
|
|
ms := fastConnRetryTimeout * (1 << (failedAttempts - maxFastConnRetries))
|
|
|
|
if ms > maxFastRetryBackoff {
|
|
|
|
return maxFastRetryBackoff
|
|
|
|
}
|
|
|
|
return ms
|
|
|
|
}
|
|
|
|
|
2022-10-13 20:46:51 +00:00
|
|
|
// if the message sent is too large probably should not retry at all
|
|
|
|
if loopErr != nil && isErrCode(loopErr, codes.ResourceExhausted) {
|
|
|
|
return maxFastRetryBackoff
|
|
|
|
}
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
// Else we go with the default backoff from retryLoopBackoff.
|
2022-10-31 18:50:58 +00:00
|
|
|
if (1 << failedAttempts) < maxRetryBackoffPeering {
|
2022-07-29 20:04:32 +00:00
|
|
|
return (1 << failedAttempts) * time.Second
|
|
|
|
}
|
2022-10-31 18:50:58 +00:00
|
|
|
return time.Duration(maxRetryBackoffPeering) * time.Second
|
2022-07-29 20:04:32 +00:00
|
|
|
}
|
|
|
|
|
2022-10-13 20:46:51 +00:00
|
|
|
// isErrCode returns true if err is a gRPC error with given error code.
|
|
|
|
func isErrCode(err error, code codes.Code) bool {
|
2022-07-29 20:04:32 +00:00
|
|
|
if err == nil {
|
|
|
|
return false
|
|
|
|
}
|
2022-08-10 15:53:25 +00:00
|
|
|
// Handle wrapped errors, since status.FromError does a naive assertion.
|
|
|
|
var statusErr interface {
|
|
|
|
GRPCStatus() *grpcstatus.Status
|
|
|
|
}
|
|
|
|
if errors.As(err, &statusErr) {
|
2022-10-13 20:46:51 +00:00
|
|
|
return statusErr.GRPCStatus().Code() == code
|
2022-08-10 15:53:25 +00:00
|
|
|
}
|
|
|
|
|
2022-07-29 20:04:32 +00:00
|
|
|
grpcErr, ok := grpcstatus.FromError(err)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
2022-10-13 20:46:51 +00:00
|
|
|
return grpcErr.Code() == code
|
2022-07-29 20:04:32 +00:00
|
|
|
}
|