Read-replica instead of non-voter (#10875)

This commit is contained in:
Vishal Nayak 2021-02-10 09:58:18 -05:00 committed by GitHub
parent c5fd996a36
commit a2394e7353
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 63 additions and 20 deletions

View File

@ -24,7 +24,7 @@ type RaftJoinRequest struct {
LeaderClientCert string `json:"leader_client_cert"`
LeaderClientKey string `json:"leader_client_key"`
Retry bool `json:"retry"`
NonVoter bool `json:"non_voter"`
ReadReplica bool `json:"read_replica"`
}
// RaftJoin adds the node from which this call is invoked from to the raft

View File

@ -14,13 +14,16 @@ var _ cli.CommandAutocomplete = (*OperatorRaftJoinCommand)(nil)
type OperatorRaftJoinCommand struct {
flagRetry bool
flagNonVoter bool
flagLeaderCACert string
flagLeaderClientCert string
flagLeaderClientKey string
flagAutoJoinScheme string
flagAutoJoinPort uint
flagReadReplica bool
*BaseCommand
// Deprecated flags
flagNonVoter bool
}
func (c *OperatorRaftJoinCommand) Synopsis() string {
@ -113,6 +116,13 @@ func (c *OperatorRaftJoinCommand) Flags() *FlagSets {
Name: "non-voter",
Target: &c.flagNonVoter,
Default: false,
Usage: "DEPRECATED: Use -read-replica instead.",
})
f.BoolVar(&BoolVar{
Name: "read-replica",
Target: &c.flagReadReplica,
Default: false,
Usage: "(Enterprise-only) This flag is used to make the server not participate in the Raft quorum, and have it only receive the data replication stream. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed.",
})
@ -148,6 +158,15 @@ func (c *OperatorRaftJoinCommand) Run(args []string) int {
return 1
}
switch {
case c.flagReadReplica:
// Prioritize -read-replica flag.
c.flagNonVoter = true
case c.flagNonVoter:
// If the deprecated -non-voter is used, update the -read-replica flag value.
c.flagReadReplica = true
}
leaderCACert, err := parseFlagFile(c.flagLeaderCACert)
if err != nil {
c.UI.Error(fmt.Sprintf("Failed to parse leader CA certificate: %s", err))
@ -182,7 +201,7 @@ func (c *OperatorRaftJoinCommand) Run(args []string) int {
LeaderClientCert: leaderClientCert,
LeaderClientKey: leaderClientKey,
Retry: c.flagRetry,
NonVoter: c.flagNonVoter,
ReadReplica: c.flagReadReplica,
}
if strings.Contains(leaderInfo, "provider=") {

View File

@ -51,8 +51,8 @@ func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Requ
return
}
if req.NonVoter && !nonVotersAllowed {
respondError(w, http.StatusBadRequest, errors.New("non-voting nodes not allowed"))
if req.ReadReplica && !readReplicasAllowed {
respondError(w, http.StatusBadRequest, errors.New("read-replica nodes not allowed"))
return
}
@ -83,7 +83,7 @@ func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Requ
},
}
joined, err := core.JoinRaftCluster(context.Background(), leaderInfos, req.NonVoter)
joined, err := core.JoinRaftCluster(context.Background(), leaderInfos, req.ReadReplica)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
@ -109,5 +109,5 @@ type JoinRequest struct {
LeaderClientKey string `json:"leader_client_key"`
LeaderTLSServerName string `json:"leader_tls_servername"`
Retry bool `json:"retry"`
NonVoter bool `json:"non_voter"`
ReadReplica bool `json:"read_replica"`
}

View File

@ -27,7 +27,7 @@ var (
additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {}
nonVotersAllowed = false
readReplicasAllowed = false
)
func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler {

View File

@ -713,7 +713,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
// Non-voting servers are only allowed in enterprise. If Suffrage is disabled,
// error out to indicate that it isn't allowed.
for idx := range recoveryConfig.Servers {
if !nonVotersAllowed && recoveryConfig.Servers[idx].Suffrage == raft.Nonvoter {
if !readReplicasAllowed && recoveryConfig.Servers[idx].Suffrage == raft.Nonvoter {
return fmt.Errorf("raft recovery failed to parse configuration for node %q: setting `non_voter` is only supported in enterprise", recoveryConfig.Servers[idx].ID)
}
}

View File

@ -7,9 +7,10 @@ import (
"errors"
)
const nonVotersAllowed = false
const readReplicasAllowed = false
// AddPeer adds a new server to the raft cluster
func (b *RaftBackend) AddNonVotingPeer(ctx context.Context, peerID, clusterAddr string) error {
// AddReadReplicaPeer adds a new server to the raft cluster which does not have
// voting rights but gets all the data replicated to it.
func (b *RaftBackend) AddReadReplicaPeer(ctx context.Context, peerID, clusterAddr string) error {
return errors.New("not implemented")
}

View File

@ -163,7 +163,7 @@ type raftInformation struct {
challenge *wrapping.EncryptedBlobInfo
leaderClient *api.Client
leaderBarrierConfig *SealConfig
nonVoter bool
readReplica bool
joinInProgress bool
}

View File

@ -34,6 +34,10 @@ func (b *SystemBackend) raftStoragePaths() []*framework.Path {
Type: framework.TypeString,
},
"non_voter": {
Type: framework.TypeBool,
Deprecated: true,
},
"read_replica": {
Type: framework.TypeBool,
},
},
@ -256,7 +260,13 @@ func (b *SystemBackend) handleRaftBootstrapAnswerWrite() framework.OperationFunc
return logical.ErrorResponse("no cluster_addr provided"), logical.ErrInvalidRequest
}
nonVoter := d.Get("non_voter").(bool)
// Prioritize read_replica parameter
readReplica := d.Get("read_replica").(bool)
// If the deprecated non_voter is used, consider that as well
if !readReplica && d.Get("non_voter").(bool) {
readReplica = true
}
answer, err := base64.StdEncoding.DecodeString(answerRaw)
if err != nil {
@ -286,9 +296,9 @@ func (b *SystemBackend) handleRaftBootstrapAnswerWrite() framework.OperationFunc
return nil, errors.New("could not decode raft TLS configuration")
}
switch nonVoter {
switch readReplica {
case true:
err = raftBackend.AddNonVotingPeer(ctx, serverID, clusterAddr)
err = raftBackend.AddReadReplicaPeer(ctx, serverID, clusterAddr)
default:
err = raftBackend.AddPeer(ctx, serverID, clusterAddr)
}

View File

@ -710,7 +710,7 @@ func (c *Core) InitiateRetryJoin(ctx context.Context) error {
return nil
}
func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJoinInfo, nonVoter bool) (bool, error) {
func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJoinInfo, readReplica bool) (bool, error) {
raftBackend := c.getRaftBackend()
if raftBackend == nil {
return false, errors.New("raft backend not in use")
@ -881,7 +881,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo
challenge: eBlob,
leaderClient: apiClient,
leaderBarrierConfig: &sealConfig,
nonVoter: nonVoter,
readReplica: readReplica,
}
// If we're using Shamir and using raft for both physical and HA, we
@ -1077,7 +1077,7 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess *seal.Access,
"answer": base64.StdEncoding.EncodeToString(plaintext),
"cluster_addr": clusterAddr,
"server_id": raftBackend.NodeID(),
"non_voter": raftInfo.nonVoter,
"read_replica": raftInfo.readReplica,
}); err != nil {
return err
}

View File

@ -24,7 +24,7 @@ type RaftJoinRequest struct {
LeaderClientCert string `json:"leader_client_cert"`
LeaderClientKey string `json:"leader_client_key"`
Retry bool `json:"retry"`
NonVoter bool `json:"non_voter"`
ReadReplica bool `json:"read_replica"`
}
// RaftJoin adds the node from which this call is invoked from to the raft

View File

@ -39,6 +39,19 @@ leader node.
- `leader_client_key` `(string: "")` - Client key used to communicate with
Raft's leader node.
- `auto_join` `(string: "")` - Defines any cloud auto-join metadata. If
supplied, Vault will attempt to automatically discover peers in addition to what
can be provided via 'leader_api_addr'.
- `auto_join_scheme` `(string: "https")` - URI scheme to be used for `auto_join`.
- `auto_join_port` `(int: 8200)` - Port to be used for `auto_join`.
- `-read-replica` `(bool: false) (enterprise)` - This flag is used to make the
server not participate in the Raft quorum, and have it only receive the data
replication stream. This can be used to add read scalability to a cluster in
cases where a high volume of reads to servers are needed. The default is false.
### Sample Payload
```json