2018-05-20 23:01:24 +00:00
|
|
|
package testhelpers
|
|
|
|
|
|
|
|
import (
|
2019-04-04 17:02:44 +00:00
|
|
|
"context"
|
2018-05-20 23:01:24 +00:00
|
|
|
"encoding/base64"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2019-06-20 19:14:58 +00:00
|
|
|
"net/url"
|
2019-06-27 17:00:03 +00:00
|
|
|
"sync/atomic"
|
2018-08-16 19:48:23 +00:00
|
|
|
"time"
|
2018-05-20 23:01:24 +00:00
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
raftlib "github.com/hashicorp/raft"
|
2019-04-04 17:02:44 +00:00
|
|
|
"github.com/hashicorp/vault/api"
|
2019-06-20 19:14:58 +00:00
|
|
|
"github.com/hashicorp/vault/helper/namespace"
|
2018-05-20 23:01:24 +00:00
|
|
|
"github.com/hashicorp/vault/helper/xor"
|
2019-08-23 15:53:18 +00:00
|
|
|
"github.com/hashicorp/vault/physical/raft"
|
2018-05-20 23:01:24 +00:00
|
|
|
"github.com/hashicorp/vault/vault"
|
2019-08-23 15:53:18 +00:00
|
|
|
"github.com/mitchellh/go-testing-interface"
|
2018-05-20 23:01:24 +00:00
|
|
|
)
|
|
|
|
|
2019-10-15 04:55:31 +00:00
|
|
|
type GenerateRootKind int
|
|
|
|
|
|
|
|
const (
|
|
|
|
GenerateRootRegular GenerateRootKind = iota
|
|
|
|
GenerateRootDR
|
|
|
|
GenerateRecovery
|
|
|
|
)
|
|
|
|
|
2018-05-20 23:01:24 +00:00
|
|
|
// Generates a root token on the target cluster.
|
2019-10-15 04:55:31 +00:00
|
|
|
func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string {
|
|
|
|
t.Helper()
|
|
|
|
token, err := GenerateRootWithError(t, cluster, kind)
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2019-10-15 04:55:31 +00:00
|
|
|
func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) {
|
|
|
|
t.Helper()
|
2018-05-20 23:01:24 +00:00
|
|
|
// If recovery keys supported, use those to perform root token generation instead
|
|
|
|
var keys [][]byte
|
|
|
|
if cluster.Cores[0].SealAccess().RecoveryKeySupported() {
|
|
|
|
keys = cluster.RecoveryKeys
|
|
|
|
} else {
|
|
|
|
keys = cluster.BarrierKeys
|
|
|
|
}
|
|
|
|
client := cluster.Cores[0].Client
|
2019-10-15 04:55:31 +00:00
|
|
|
|
|
|
|
var err error
|
|
|
|
var status *api.GenerateRootStatusResponse
|
|
|
|
switch kind {
|
|
|
|
case GenerateRootRegular:
|
|
|
|
status, err = client.Sys().GenerateRootInit("", "")
|
|
|
|
case GenerateRootDR:
|
|
|
|
status, err = client.Sys().GenerateDROperationTokenInit("", "")
|
|
|
|
case GenerateRecovery:
|
|
|
|
status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "")
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if status.Required > len(keys) {
|
|
|
|
return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys))
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
otp := status.OTP
|
|
|
|
|
2018-05-20 23:01:24 +00:00
|
|
|
for i, key := range keys {
|
|
|
|
if i >= status.Required {
|
|
|
|
break
|
|
|
|
}
|
2019-10-15 04:55:31 +00:00
|
|
|
|
|
|
|
strKey := base64.StdEncoding.EncodeToString(key)
|
|
|
|
switch kind {
|
|
|
|
case GenerateRootRegular:
|
|
|
|
status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce)
|
|
|
|
case GenerateRootDR:
|
|
|
|
status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce)
|
|
|
|
case GenerateRecovery:
|
|
|
|
status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce)
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !status.Complete {
|
|
|
|
return "", errors.New("generate root operation did not end successfully")
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
|
|
|
tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken)
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp))
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
return string(tokenBytes), nil
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
2018-08-16 19:48:23 +00:00
|
|
|
|
|
|
|
// RandomWithPrefix is used to generate a unique name with a prefix, for
|
|
|
|
// randomizing names in acceptance tests
|
|
|
|
func RandomWithPrefix(name string) string {
|
|
|
|
return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
func EnsureCoresSealed(t testing.T, c *vault.TestCluster) {
|
2018-09-18 03:03:00 +00:00
|
|
|
t.Helper()
|
|
|
|
for _, core := range c.Cores {
|
2019-02-06 02:01:18 +00:00
|
|
|
EnsureCoreSealed(t, core)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-23 19:12:04 +00:00
|
|
|
func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) {
|
|
|
|
t.Helper()
|
2019-02-06 02:01:18 +00:00
|
|
|
core.Seal(t)
|
|
|
|
timeout := time.Now().Add(60 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
2019-07-23 19:12:04 +00:00
|
|
|
t.Fatal("timeout waiting for core to seal")
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
if core.Core.Sealed() {
|
|
|
|
break
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) {
|
|
|
|
t.Helper()
|
2019-10-18 18:46:00 +00:00
|
|
|
for i, core := range c.Cores {
|
|
|
|
err := AttemptUnsealCore(c, core)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to unseal core %d: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-15 00:39:13 +00:00
|
|
|
func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClusterCore) {
|
|
|
|
t.Helper()
|
|
|
|
err := AttemptUnsealCore(c, core)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to unseal core: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
func AttemptUnsealCores(c *vault.TestCluster) error {
|
|
|
|
for i, core := range c.Cores {
|
|
|
|
err := AttemptUnsealCore(c, core)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to unseal core %d: %v", i, err)
|
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
|
|
|
|
func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error {
|
2019-02-06 02:01:18 +00:00
|
|
|
if !core.Sealed() {
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
|
2019-08-23 19:51:25 +00:00
|
|
|
core.SealAccess().ClearCaches(context.Background())
|
|
|
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
2019-10-18 18:46:00 +00:00
|
|
|
return err
|
2019-08-23 19:51:25 +00:00
|
|
|
}
|
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
client := core.Client
|
|
|
|
client.Sys().ResetUnsealProcess()
|
|
|
|
for j := 0; j < len(c.BarrierKeys); j++ {
|
|
|
|
statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j]))
|
|
|
|
if err != nil {
|
|
|
|
// Sometimes when we get here it's already unsealed on its own
|
|
|
|
// and then this fails for DR secondaries so check again
|
|
|
|
if core.Sealed() {
|
2019-10-18 18:46:00 +00:00
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
return nil
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
if statusResp == nil {
|
2019-10-18 18:46:00 +00:00
|
|
|
return fmt.Errorf("nil status response during unseal")
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
if !statusResp.Sealed {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if core.Sealed() {
|
2019-10-18 18:46:00 +00:00
|
|
|
return fmt.Errorf("core is still sealed")
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
func EnsureStableActiveNode(t testing.T, cluster *vault.TestCluster) {
|
2020-02-13 21:18:53 +00:00
|
|
|
deriveStableActiveCore(t, cluster)
|
|
|
|
}
|
|
|
|
|
|
|
|
func DeriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
|
|
|
return deriveStableActiveCore(t, cluster)
|
|
|
|
}
|
|
|
|
|
|
|
|
func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
2019-06-20 19:14:58 +00:00
|
|
|
activeCore := DeriveActiveCore(t, cluster)
|
2020-02-13 21:18:53 +00:00
|
|
|
minDuration := time.NewTimer(3 * time.Second)
|
2019-06-20 19:14:58 +00:00
|
|
|
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
leaderResp, err := activeCore.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !leaderResp.IsSelf {
|
2020-02-13 21:18:53 +00:00
|
|
|
minDuration.Reset(3 * time.Second)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
}
|
2020-02-13 21:18:53 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-minDuration.C:
|
|
|
|
default:
|
|
|
|
if stopped := minDuration.Stop(); stopped {
|
|
|
|
t.Fatal("unstable active node")
|
|
|
|
}
|
|
|
|
// Drain the value
|
|
|
|
<-minDuration.C
|
|
|
|
}
|
|
|
|
|
|
|
|
return activeCore
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
leaderResp, err := core.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if leaderResp.IsSelf {
|
|
|
|
return core
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
}
|
|
|
|
t.Fatal("could not derive the active core")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-11 15:12:37 +00:00
|
|
|
func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestClusterCore {
|
|
|
|
cores := make([]*vault.TestClusterCore, 0, 2)
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
leaderResp, err := core.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !leaderResp.IsSelf {
|
|
|
|
cores = append(cores, core)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cores
|
|
|
|
}
|
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) {
|
|
|
|
t.Helper()
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
unsealed := 0
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if !core.Core.Sealed() {
|
|
|
|
unsealed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if unsealed >= n {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
t.Fatalf("%d cores were not unsealed", n)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Helper()
|
2019-08-23 19:51:25 +00:00
|
|
|
for i := 0; i < 60; i++ {
|
2018-09-18 03:03:00 +00:00
|
|
|
sealed := 0
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if core.Core.Sealed() {
|
|
|
|
sealed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sealed >= n {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("%d cores were not sealed", n)
|
|
|
|
}
|
2019-02-19 20:03:02 +00:00
|
|
|
|
|
|
|
func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Helper()
|
|
|
|
for i := 0; i < 30; i++ {
|
2019-02-19 20:03:02 +00:00
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if standby, _ := core.Core.Standby(); !standby {
|
|
|
|
return core
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("node did not become active")
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-04 17:02:44 +00:00
|
|
|
|
2020-02-15 00:39:13 +00:00
|
|
|
func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) {
|
|
|
|
t.Helper()
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
if isLeader, _, clusterAddr, _ := core.Core.Leader(); isLeader != true && clusterAddr != "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("node did not become standby")
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]byte {
|
|
|
|
t.Helper()
|
|
|
|
cluster.Logger.Info("rekeying cluster", "recovery", recovery)
|
2019-06-20 19:14:58 +00:00
|
|
|
client := cluster.Cores[0].Client
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
initFunc := client.Sys().RekeyInit
|
|
|
|
if recovery {
|
|
|
|
initFunc = client.Sys().RekeyRecoveryKeyInit
|
|
|
|
}
|
|
|
|
init, err := initFunc(&api.RekeyInitRequest{
|
2019-06-20 19:14:58 +00:00
|
|
|
SecretShares: 5,
|
|
|
|
SecretThreshold: 3,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var statusResp *api.RekeyUpdateResponse
|
2019-10-18 18:46:00 +00:00
|
|
|
var keys = cluster.BarrierKeys
|
|
|
|
if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() {
|
|
|
|
keys = cluster.RecoveryKeys
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFunc := client.Sys().RekeyUpdate
|
|
|
|
if recovery {
|
|
|
|
updateFunc = client.Sys().RekeyRecoveryKeyUpdate
|
|
|
|
}
|
|
|
|
for j := 0; j < len(keys); j++ {
|
|
|
|
statusResp, err = updateFunc(base64.StdEncoding.EncodeToString(keys[j]), init.Nonce)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if statusResp == nil {
|
|
|
|
t.Fatal("nil status response during unseal")
|
|
|
|
}
|
|
|
|
if statusResp.Complete {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
cluster.Logger.Info("cluster rekeyed", "recovery", recovery)
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() && !recovery {
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
if len(statusResp.KeysB64) != 5 {
|
|
|
|
t.Fatal("wrong number of keys")
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
newKeys := make([][]byte, 5)
|
2019-06-20 19:14:58 +00:00
|
|
|
for i, key := range statusResp.KeysB64 {
|
2019-10-18 18:46:00 +00:00
|
|
|
newKeys[i], err = base64.StdEncoding.DecodeString(key)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return newKeys
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
// TestRaftServerAddressProvider is a ServerAddressProvider that uses the
|
|
|
|
// ClusterAddr() of each node to provide raft addresses.
|
|
|
|
//
|
|
|
|
// Note that TestRaftServerAddressProvider should only be used in cases where
|
|
|
|
// cores that are part of a raft configuration have already had
|
|
|
|
// startClusterListener() called (via either unsealing or raft joining).
|
2019-06-20 19:14:58 +00:00
|
|
|
type TestRaftServerAddressProvider struct {
|
|
|
|
Cluster *vault.TestCluster
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
|
|
|
|
for _, core := range p.Cluster.Cores {
|
|
|
|
if core.NodeID == string(id) {
|
|
|
|
parsed, err := url.Parse(core.ClusterAddr())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return raftlib.ServerAddress(parsed.Host), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", errors.New("could not find cluster addr")
|
|
|
|
}
|
|
|
|
|
|
|
|
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
|
|
|
|
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
|
|
|
|
|
|
|
|
leaderCore := cluster.Cores[0]
|
|
|
|
leaderAPI := leaderCore.Client.Address()
|
2019-06-27 17:00:03 +00:00
|
|
|
atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)
|
2019-06-20 19:14:58 +00:00
|
|
|
|
|
|
|
// Seal the leader so we can install an address provider
|
|
|
|
{
|
|
|
|
EnsureCoreSealed(t, leaderCore)
|
|
|
|
leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
|
|
|
|
cluster.UnsealCore(t, leaderCore)
|
|
|
|
vault.TestWaitActive(t, leaderCore.Core)
|
|
|
|
}
|
|
|
|
|
2020-01-14 01:02:16 +00:00
|
|
|
leaderInfo := &raft.LeaderJoinInfo{
|
|
|
|
LeaderAPIAddr: leaderAPI,
|
|
|
|
TLSConfig: leaderCore.TLSConfig,
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
for i := 1; i < len(cluster.Cores); i++ {
|
|
|
|
core := cluster.Cores[i]
|
2019-06-20 19:14:58 +00:00
|
|
|
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
|
2020-01-14 01:02:16 +00:00
|
|
|
leaderInfos := []*raft.LeaderJoinInfo{
|
|
|
|
leaderInfo,
|
|
|
|
}
|
|
|
|
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cluster.UnsealCore(t, core)
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
// HardcodedServerAddressProvider is a ServerAddressProvider that uses
|
|
|
|
// a hardcoded map of raft node addresses.
|
|
|
|
//
|
|
|
|
// It is useful in cases where the raft configuration is known ahead of time,
|
|
|
|
// but some of the cores have not yet had startClusterListener() called (via
|
|
|
|
// either unsealing or raft joining), and thus do not yet have a ClusterAddr()
|
|
|
|
// assigned.
|
|
|
|
type HardcodedServerAddressProvider struct {
|
|
|
|
Entries map[raftlib.ServerID]raftlib.ServerAddress
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
|
|
|
|
if addr, ok := p.Entries[id]; ok {
|
|
|
|
return addr, nil
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2020-05-14 12:31:02 +00:00
|
|
|
return "", errors.New("could not find cluster addr")
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewHardcodedServerAddressProvider is a convenience function that makes a
|
|
|
|
// ServerAddressProvider from a given cluster address base port.
|
|
|
|
func NewHardcodedServerAddressProvider(cluster *vault.TestCluster, baseClusterPort int) raftlib.ServerAddressProvider {
|
|
|
|
|
|
|
|
entries := make(map[raftlib.ServerID]raftlib.ServerAddress)
|
|
|
|
|
|
|
|
for i := 0; i < len(cluster.Cores); i++ {
|
|
|
|
id := fmt.Sprintf("core-%d", i)
|
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i)
|
|
|
|
entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &HardcodedServerAddressProvider{
|
|
|
|
entries,
|
|
|
|
}
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
// SetRaftAddressProviders sets a ServerAddressProvider for all the nodes in a
|
|
|
|
// cluster.
|
|
|
|
func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider raftlib.ServerAddressProvider) {
|
|
|
|
|
|
|
|
atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)
|
|
|
|
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(provider)
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|