2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2019-04-26 17:38:39 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2019-10-04 17:08:45 +00:00
|
|
|
"sync/atomic"
|
2019-04-26 17:38:39 +00:00
|
|
|
"time"
|
|
|
|
|
2019-10-28 17:49:57 +00:00
|
|
|
metrics "github.com/armon/go-metrics"
|
2021-04-23 21:05:33 +00:00
|
|
|
"github.com/armon/go-metrics/prometheus"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2019-04-26 17:38:39 +00:00
|
|
|
"golang.org/x/time/rate"
|
2020-10-01 23:02:32 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/lib/retry"
|
|
|
|
"github.com/hashicorp/consul/logging"
|
2019-04-26 17:38:39 +00:00
|
|
|
)
|
|
|
|
|
2021-04-23 21:05:33 +00:00
|
|
|
var ReplicationGauges = []prometheus.GaugeDefinition{
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "acl-policies", "status"},
|
|
|
|
Help: "Tracks the current health of ACL policy replication on the leader",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "acl-policies", "index"},
|
|
|
|
Help: "Tracks the index of ACL policies in the primary that the secondary has successfully replicated",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "acl-tokens", "status"},
|
|
|
|
Help: "Tracks the current health of ACL token replication on the leader",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "acl-tokens", "index"},
|
|
|
|
Help: "Tracks the index of ACL tokens in the primary that the secondary has successfully replicated",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "acl-roles", "status"},
|
|
|
|
Help: "Tracks the current health of ACL role replication on the leader",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "acl-roles", "index"},
|
|
|
|
Help: "Tracks the index of ACL roles in the primary that the secondary has successfully replicated",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "config-entries", "status"},
|
|
|
|
Help: "Tracks the current health of config entry replication on the leader",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "config-entries", "index"},
|
|
|
|
Help: "Tracks the index of config entries in the primary that the secondary has successfully replicated",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "federation-state", "status"},
|
|
|
|
Help: "Tracks the current health of federation state replication on the leader",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "federation-state", "index"},
|
|
|
|
Help: "Tracks the index of federation states in the primary that the secondary has successfully replicated",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "namespaces", "status"},
|
|
|
|
Help: "Tracks the current health of federation state replication on the leader",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: []string{"leader", "replication", "namespaces", "index"},
|
|
|
|
Help: "Tracks the index of federation states in the primary that the secondary has successfully replicated",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2019-04-26 17:38:39 +00:00
|
|
|
const (
|
|
|
|
// replicationMaxRetryWait is the maximum number of seconds to wait between
|
|
|
|
// failed blocking queries when backing off.
|
|
|
|
replicationDefaultMaxRetryWait = 120 * time.Second
|
|
|
|
)
|
|
|
|
|
2019-10-28 17:49:57 +00:00
|
|
|
type ReplicatorDelegate interface {
|
2020-01-28 23:50:41 +00:00
|
|
|
Replicate(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (index uint64, exit bool, err error)
|
2021-04-22 15:20:53 +00:00
|
|
|
MetricName() string
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 17:38:39 +00:00
|
|
|
type ReplicatorConfig struct {
|
|
|
|
// Name to be used in various logging
|
|
|
|
Name string
|
2019-10-28 17:49:57 +00:00
|
|
|
// Delegate to perform each round of replication
|
|
|
|
Delegate ReplicatorDelegate
|
2019-04-26 17:38:39 +00:00
|
|
|
// The number of replication rounds per second that are allowed
|
|
|
|
Rate int
|
|
|
|
// The number of replication rounds that can be done in a burst
|
|
|
|
Burst int
|
|
|
|
// Minimum number of RPC failures to ignore before backing off
|
2020-10-01 05:14:21 +00:00
|
|
|
MinFailures uint
|
2019-04-26 17:38:39 +00:00
|
|
|
// Maximum wait time between failing RPCs
|
|
|
|
MaxRetryWait time.Duration
|
|
|
|
// Where to send our logs
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger hclog.Logger
|
|
|
|
// Function to use for determining if an error should be suppressed
|
|
|
|
SuppressErrorLog func(err error) bool
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Replicator struct {
|
2020-01-28 23:50:41 +00:00
|
|
|
limiter *rate.Limiter
|
2020-10-01 23:02:32 +00:00
|
|
|
waiter *retry.Waiter
|
2020-01-28 23:50:41 +00:00
|
|
|
delegate ReplicatorDelegate
|
|
|
|
logger hclog.Logger
|
|
|
|
lastRemoteIndex uint64
|
|
|
|
suppressErrorLog func(err error) bool
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewReplicator(config *ReplicatorConfig) (*Replicator, error) {
|
|
|
|
if config == nil {
|
|
|
|
return nil, fmt.Errorf("Cannot create the Replicator without a config")
|
|
|
|
}
|
2019-10-28 17:49:57 +00:00
|
|
|
if config.Delegate == nil {
|
|
|
|
return nil, fmt.Errorf("Cannot create the Replicator without a Delegate set in the config")
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
if config.Logger == nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := hclog.New(&hclog.LoggerOptions{})
|
|
|
|
config.Logger = logger
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
limiter := rate.NewLimiter(rate.Limit(config.Rate), config.Burst)
|
|
|
|
|
|
|
|
maxWait := config.MaxRetryWait
|
|
|
|
if maxWait == 0 {
|
|
|
|
maxWait = replicationDefaultMaxRetryWait
|
|
|
|
}
|
2020-10-01 05:14:21 +00:00
|
|
|
waiter := &retry.Waiter{
|
|
|
|
MinFailures: config.MinFailures,
|
|
|
|
MaxWait: maxWait,
|
|
|
|
Jitter: retry.NewJitter(10),
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
return &Replicator{
|
2020-01-28 23:50:41 +00:00
|
|
|
limiter: limiter,
|
|
|
|
waiter: waiter,
|
|
|
|
delegate: config.Delegate,
|
|
|
|
logger: config.Logger.Named(logging.Replication).Named(config.Name),
|
|
|
|
suppressErrorLog: config.SuppressErrorLog,
|
2019-04-26 17:38:39 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
func (r *Replicator) Run(ctx context.Context) error {
|
2020-01-28 23:50:41 +00:00
|
|
|
defer r.logger.Info("stopped replication")
|
2019-04-26 17:38:39 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
// This ensures we aren't doing too many successful replication rounds - mostly useful when
|
|
|
|
// the data within the primary datacenter is changing rapidly but we try to limit the amount
|
|
|
|
// of resources replication into the secondary datacenter should take
|
2019-10-04 17:08:45 +00:00
|
|
|
if err := r.limiter.Wait(ctx); err != nil {
|
|
|
|
return nil
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Perform a single round of replication
|
2020-01-28 23:50:41 +00:00
|
|
|
index, exit, err := r.delegate.Replicate(ctx, atomic.LoadUint64(&r.lastRemoteIndex), r.logger)
|
2019-04-26 17:38:39 +00:00
|
|
|
if exit {
|
2019-10-04 17:08:45 +00:00
|
|
|
return nil
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2021-04-22 15:20:53 +00:00
|
|
|
metrics.SetGauge([]string{"leader", "replication", r.delegate.MetricName(), "status"},
|
|
|
|
0,
|
|
|
|
)
|
2019-04-26 17:38:39 +00:00
|
|
|
// reset the lastRemoteIndex when there is an RPC failure. This should cause a full sync to be done during
|
|
|
|
// the next round of replication
|
2019-10-04 17:08:45 +00:00
|
|
|
atomic.StoreUint64(&r.lastRemoteIndex, 0)
|
2020-01-28 23:50:41 +00:00
|
|
|
|
2021-01-08 20:03:06 +00:00
|
|
|
if r.suppressErrorLog == nil || !r.suppressErrorLog(err) {
|
2020-01-28 23:50:41 +00:00
|
|
|
r.logger.Warn("replication error (will retry if still leader)", "error", err)
|
|
|
|
}
|
2019-04-26 17:38:39 +00:00
|
|
|
|
2020-10-01 05:14:21 +00:00
|
|
|
if err := r.waiter.Wait(ctx); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
continue
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
2020-10-01 05:14:21 +00:00
|
|
|
|
2021-04-22 15:20:53 +00:00
|
|
|
metrics.SetGauge([]string{"leader", "replication", r.delegate.MetricName(), "status"},
|
|
|
|
1,
|
|
|
|
)
|
|
|
|
metrics.SetGauge([]string{"leader", "replication", r.delegate.MetricName(), "index"},
|
|
|
|
float32(index),
|
|
|
|
)
|
|
|
|
|
2020-10-01 05:14:21 +00:00
|
|
|
atomic.StoreUint64(&r.lastRemoteIndex, index)
|
|
|
|
r.logger.Debug("replication completed through remote index", "index", index)
|
|
|
|
r.waiter.Reset()
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-04 17:08:45 +00:00
|
|
|
func (r *Replicator) Index() uint64 {
|
|
|
|
return atomic.LoadUint64(&r.lastRemoteIndex)
|
2019-04-26 17:38:39 +00:00
|
|
|
}
|
2019-10-28 17:49:57 +00:00
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
type ReplicatorFunc func(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (index uint64, exit bool, err error)
|
2019-10-28 17:49:57 +00:00
|
|
|
|
|
|
|
type FunctionReplicator struct {
|
|
|
|
ReplicateFn ReplicatorFunc
|
2021-04-22 15:20:53 +00:00
|
|
|
Name string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *FunctionReplicator) MetricName() string {
|
|
|
|
return r.Name
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
func (r *FunctionReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (uint64, bool, error) {
|
|
|
|
return r.ReplicateFn(ctx, lastRemoteIndex, logger)
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type IndexReplicatorDiff struct {
|
|
|
|
NumUpdates int
|
|
|
|
Updates interface{}
|
|
|
|
NumDeletions int
|
|
|
|
Deletions interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
type IndexReplicatorDelegate interface {
|
|
|
|
// SingularNoun is the singular form of the item being replicated.
|
|
|
|
SingularNoun() string
|
|
|
|
|
|
|
|
// PluralNoun is the plural form of the item being replicated.
|
|
|
|
PluralNoun() string
|
|
|
|
|
|
|
|
// Name to use when emitting metrics
|
|
|
|
MetricName() string
|
|
|
|
|
|
|
|
// FetchRemote retrieves items newer than the provided index from the
|
|
|
|
// remote datacenter (for diffing purposes).
|
|
|
|
FetchRemote(lastRemoteIndex uint64) (int, interface{}, uint64, error)
|
|
|
|
|
|
|
|
// FetchLocal retrieves items from the current datacenter (for diffing
|
|
|
|
// purposes).
|
|
|
|
FetchLocal() (int, interface{}, error)
|
|
|
|
|
|
|
|
DiffRemoteAndLocalState(local interface{}, remote interface{}, lastRemoteIndex uint64) (*IndexReplicatorDiff, error)
|
|
|
|
|
|
|
|
PerformDeletions(ctx context.Context, deletions interface{}) (exit bool, err error)
|
|
|
|
|
|
|
|
PerformUpdates(ctx context.Context, updates interface{}) (exit bool, err error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type IndexReplicator struct {
|
|
|
|
Delegate IndexReplicatorDelegate
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger hclog.Logger
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
2021-04-22 15:20:53 +00:00
|
|
|
func (r *IndexReplicator) MetricName() string {
|
|
|
|
return r.Delegate.MetricName()
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64, _ hclog.Logger) (uint64, bool, error) {
|
2019-10-28 17:49:57 +00:00
|
|
|
fetchStart := time.Now()
|
|
|
|
lenRemote, remote, remoteIndex, err := r.Delegate.FetchRemote(lastRemoteIndex)
|
|
|
|
metrics.MeasureSince([]string{"leader", "replication", r.Delegate.MetricName(), "fetch"}, fetchStart)
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-04 21:05:27 +00:00
|
|
|
return 0, false, fmt.Errorf("failed to retrieve %s: %w", r.Delegate.PluralNoun(), err)
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
2021-05-19 19:07:24 +00:00
|
|
|
r.Logger.Debug("finished fetching remote objects", "amount", lenRemote)
|
2019-10-28 17:49:57 +00:00
|
|
|
|
|
|
|
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
|
|
|
// RPC which could have been hanging around for a long time and during that time leadership could
|
|
|
|
// have been lost.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return 0, true, nil
|
|
|
|
default:
|
|
|
|
// do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
// Measure everything after the remote query, which can block for long
|
|
|
|
// periods of time. This metric is a good measure of how expensive the
|
|
|
|
// replication process is.
|
|
|
|
defer metrics.MeasureSince([]string{"leader", "replication", r.Delegate.MetricName(), "apply"}, time.Now())
|
|
|
|
|
|
|
|
lenLocal, local, err := r.Delegate.FetchLocal()
|
|
|
|
if err != nil {
|
|
|
|
return 0, false, fmt.Errorf("failed to retrieve local %s: %v", r.Delegate.PluralNoun(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the remote index ever goes backwards, it's a good indication that
|
|
|
|
// the remote side was rebuilt and we should do a full sync since we
|
|
|
|
// can't make any assumptions about what's going on.
|
|
|
|
//
|
|
|
|
// Resetting lastRemoteIndex to 0 will work because we never consider local
|
|
|
|
// raft indices. Instead we compare the raft modify index in the response object
|
|
|
|
// with the lastRemoteIndex (only when we already have a config entry of the same kind/name)
|
|
|
|
// to determine if an update is needed. Resetting lastRemoteIndex to 0 then has the affect
|
|
|
|
// of making us think all the local state is out of date and any matching entries should
|
|
|
|
// still be updated.
|
|
|
|
//
|
|
|
|
// The lastRemoteIndex is not used when the entry exists either only in the local state or
|
|
|
|
// only in the remote state. In those situations we need to either delete it or create it.
|
|
|
|
if remoteIndex < lastRemoteIndex {
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Warn("replication remote index moved backwards, forcing a full sync",
|
|
|
|
"from", lastRemoteIndex,
|
|
|
|
"to", remoteIndex,
|
|
|
|
)
|
2019-10-28 17:49:57 +00:00
|
|
|
lastRemoteIndex = 0
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Debug("diffing replication state",
|
|
|
|
"local_amount", lenLocal,
|
|
|
|
"remote_amount", lenRemote,
|
|
|
|
)
|
2019-10-28 17:49:57 +00:00
|
|
|
|
|
|
|
// Calculate the changes required to bring the state into sync and then
|
|
|
|
// apply them.
|
|
|
|
diff, err := r.Delegate.DiffRemoteAndLocalState(local, remote, lastRemoteIndex)
|
|
|
|
if err != nil {
|
|
|
|
return 0, false, fmt.Errorf("failed to diff %s local and remote states: %v", r.Delegate.SingularNoun(), err)
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Debug("diffed replication state",
|
|
|
|
"deletions", diff.NumDeletions,
|
|
|
|
"updates", diff.NumUpdates,
|
|
|
|
)
|
2019-10-28 17:49:57 +00:00
|
|
|
|
|
|
|
if diff.NumDeletions > 0 {
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Debug("performing deletions",
|
|
|
|
"deletions", diff.NumDeletions,
|
|
|
|
)
|
2019-10-28 17:49:57 +00:00
|
|
|
|
|
|
|
exit, err := r.Delegate.PerformDeletions(ctx, diff.Deletions)
|
|
|
|
if exit {
|
|
|
|
return 0, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return 0, false, fmt.Errorf("failed to apply local %s deletions: %v", r.Delegate.SingularNoun(), err)
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Debug("finished deletions")
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if diff.NumUpdates > 0 {
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Debug("performing updates",
|
|
|
|
"updates", diff.NumUpdates,
|
|
|
|
)
|
2019-10-28 17:49:57 +00:00
|
|
|
|
|
|
|
exit, err := r.Delegate.PerformUpdates(ctx, diff.Updates)
|
|
|
|
if exit {
|
|
|
|
return 0, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return 0, false, fmt.Errorf("failed to apply local %s updates: %v", r.Delegate.SingularNoun(), err)
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
r.Logger.Debug("finished updates")
|
2019-10-28 17:49:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return the index we got back from the remote side, since we've synced
|
|
|
|
// up with the remote state as of that index.
|
|
|
|
return remoteIndex, false, nil
|
|
|
|
}
|