2018-05-20 23:01:24 +00:00
|
|
|
package testhelpers
|
|
|
|
|
|
|
|
import (
|
2019-04-04 17:02:44 +00:00
|
|
|
"context"
|
2018-05-20 23:01:24 +00:00
|
|
|
"encoding/base64"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2019-06-20 19:14:58 +00:00
|
|
|
"net/url"
|
2019-06-27 17:00:03 +00:00
|
|
|
"sync/atomic"
|
2018-08-16 19:48:23 +00:00
|
|
|
"time"
|
2018-05-20 23:01:24 +00:00
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
raftlib "github.com/hashicorp/raft"
|
2019-04-04 17:02:44 +00:00
|
|
|
"github.com/hashicorp/vault/api"
|
2019-06-20 19:14:58 +00:00
|
|
|
"github.com/hashicorp/vault/helper/namespace"
|
2018-05-20 23:01:24 +00:00
|
|
|
"github.com/hashicorp/vault/helper/xor"
|
2019-08-23 15:53:18 +00:00
|
|
|
"github.com/hashicorp/vault/physical/raft"
|
2018-05-20 23:01:24 +00:00
|
|
|
"github.com/hashicorp/vault/vault"
|
2019-08-23 15:53:18 +00:00
|
|
|
"github.com/mitchellh/go-testing-interface"
|
2018-05-20 23:01:24 +00:00
|
|
|
)
|
|
|
|
|
2019-10-15 04:55:31 +00:00
|
|
|
type GenerateRootKind int
|
|
|
|
|
|
|
|
const (
|
|
|
|
GenerateRootRegular GenerateRootKind = iota
|
|
|
|
GenerateRootDR
|
|
|
|
GenerateRecovery
|
|
|
|
)
|
|
|
|
|
2018-05-20 23:01:24 +00:00
|
|
|
// Generates a root token on the target cluster.
|
2019-10-15 04:55:31 +00:00
|
|
|
func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string {
|
|
|
|
t.Helper()
|
|
|
|
token, err := GenerateRootWithError(t, cluster, kind)
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2019-10-15 04:55:31 +00:00
|
|
|
func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) {
|
|
|
|
t.Helper()
|
2018-05-20 23:01:24 +00:00
|
|
|
// If recovery keys supported, use those to perform root token generation instead
|
|
|
|
var keys [][]byte
|
|
|
|
if cluster.Cores[0].SealAccess().RecoveryKeySupported() {
|
|
|
|
keys = cluster.RecoveryKeys
|
|
|
|
} else {
|
|
|
|
keys = cluster.BarrierKeys
|
|
|
|
}
|
|
|
|
client := cluster.Cores[0].Client
|
2019-10-15 04:55:31 +00:00
|
|
|
|
|
|
|
var err error
|
|
|
|
var status *api.GenerateRootStatusResponse
|
|
|
|
switch kind {
|
|
|
|
case GenerateRootRegular:
|
|
|
|
status, err = client.Sys().GenerateRootInit("", "")
|
|
|
|
case GenerateRootDR:
|
|
|
|
status, err = client.Sys().GenerateDROperationTokenInit("", "")
|
|
|
|
case GenerateRecovery:
|
|
|
|
status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "")
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if status.Required > len(keys) {
|
|
|
|
return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys))
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
otp := status.OTP
|
|
|
|
|
2018-05-20 23:01:24 +00:00
|
|
|
for i, key := range keys {
|
|
|
|
if i >= status.Required {
|
|
|
|
break
|
|
|
|
}
|
2019-10-15 04:55:31 +00:00
|
|
|
|
|
|
|
strKey := base64.StdEncoding.EncodeToString(key)
|
|
|
|
switch kind {
|
|
|
|
case GenerateRootRegular:
|
|
|
|
status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce)
|
|
|
|
case GenerateRootDR:
|
|
|
|
status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce)
|
|
|
|
case GenerateRecovery:
|
|
|
|
status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce)
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !status.Complete {
|
|
|
|
return "", errors.New("generate root operation did not end successfully")
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
|
|
|
tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken)
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp))
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
return string(tokenBytes), nil
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
2018-08-16 19:48:23 +00:00
|
|
|
|
|
|
|
// RandomWithPrefix is used to generate a unique name with a prefix, for
|
|
|
|
// randomizing names in acceptance tests
|
|
|
|
func RandomWithPrefix(name string) string {
|
|
|
|
return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
func EnsureCoresSealed(t testing.T, c *vault.TestCluster) {
|
2018-09-18 03:03:00 +00:00
|
|
|
t.Helper()
|
|
|
|
for _, core := range c.Cores {
|
2019-02-06 02:01:18 +00:00
|
|
|
EnsureCoreSealed(t, core)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-23 19:12:04 +00:00
|
|
|
func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) {
|
|
|
|
t.Helper()
|
2019-02-06 02:01:18 +00:00
|
|
|
core.Seal(t)
|
|
|
|
timeout := time.Now().Add(60 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
2019-07-23 19:12:04 +00:00
|
|
|
t.Fatal("timeout waiting for core to seal")
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
if core.Core.Sealed() {
|
|
|
|
break
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) {
|
|
|
|
t.Helper()
|
2019-10-18 18:46:00 +00:00
|
|
|
for i, core := range c.Cores {
|
|
|
|
err := AttemptUnsealCore(c, core)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to unseal core %d: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-15 00:39:13 +00:00
|
|
|
func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClusterCore) {
|
|
|
|
t.Helper()
|
|
|
|
err := AttemptUnsealCore(c, core)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to unseal core: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
func AttemptUnsealCores(c *vault.TestCluster) error {
|
|
|
|
for i, core := range c.Cores {
|
|
|
|
err := AttemptUnsealCore(c, core)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to unseal core %d: %v", i, err)
|
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
|
|
|
|
func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error {
|
2019-02-06 02:01:18 +00:00
|
|
|
if !core.Sealed() {
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
|
2019-08-23 19:51:25 +00:00
|
|
|
core.SealAccess().ClearCaches(context.Background())
|
|
|
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
2019-10-18 18:46:00 +00:00
|
|
|
return err
|
2019-08-23 19:51:25 +00:00
|
|
|
}
|
|
|
|
|
2019-02-06 02:01:18 +00:00
|
|
|
client := core.Client
|
|
|
|
client.Sys().ResetUnsealProcess()
|
|
|
|
for j := 0; j < len(c.BarrierKeys); j++ {
|
|
|
|
statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j]))
|
|
|
|
if err != nil {
|
|
|
|
// Sometimes when we get here it's already unsealed on its own
|
|
|
|
// and then this fails for DR secondaries so check again
|
|
|
|
if core.Sealed() {
|
2019-10-18 18:46:00 +00:00
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
return nil
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
if statusResp == nil {
|
2019-10-18 18:46:00 +00:00
|
|
|
return fmt.Errorf("nil status response during unseal")
|
2018-09-18 03:03:00 +00:00
|
|
|
}
|
2019-02-06 02:01:18 +00:00
|
|
|
if !statusResp.Sealed {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if core.Sealed() {
|
2019-10-18 18:46:00 +00:00
|
|
|
return fmt.Errorf("core is still sealed")
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return nil
|
2019-02-06 02:01:18 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
func EnsureStableActiveNode(t testing.T, cluster *vault.TestCluster) {
|
2020-02-13 21:18:53 +00:00
|
|
|
deriveStableActiveCore(t, cluster)
|
|
|
|
}
|
|
|
|
|
|
|
|
func DeriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
|
|
|
return deriveStableActiveCore(t, cluster)
|
|
|
|
}
|
|
|
|
|
|
|
|
func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
2019-06-20 19:14:58 +00:00
|
|
|
activeCore := DeriveActiveCore(t, cluster)
|
2020-02-13 21:18:53 +00:00
|
|
|
minDuration := time.NewTimer(3 * time.Second)
|
2019-06-20 19:14:58 +00:00
|
|
|
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
leaderResp, err := activeCore.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !leaderResp.IsSelf {
|
2020-02-13 21:18:53 +00:00
|
|
|
minDuration.Reset(3 * time.Second)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
}
|
2020-02-13 21:18:53 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-minDuration.C:
|
|
|
|
default:
|
|
|
|
if stopped := minDuration.Stop(); stopped {
|
|
|
|
t.Fatal("unstable active node")
|
|
|
|
}
|
|
|
|
// Drain the value
|
|
|
|
<-minDuration.C
|
|
|
|
}
|
|
|
|
|
|
|
|
return activeCore
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
2020-09-18 15:07:35 +00:00
|
|
|
for i := 0; i < 20; i++ {
|
2018-09-18 03:03:00 +00:00
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
leaderResp, err := core.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if leaderResp.IsSelf {
|
|
|
|
return core
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
}
|
|
|
|
t.Fatal("could not derive the active core")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-11 15:12:37 +00:00
|
|
|
func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestClusterCore {
|
|
|
|
cores := make([]*vault.TestClusterCore, 0, 2)
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
leaderResp, err := core.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !leaderResp.IsSelf {
|
|
|
|
cores = append(cores, core)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cores
|
|
|
|
}
|
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) {
|
|
|
|
t.Helper()
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
unsealed := 0
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if !core.Core.Sealed() {
|
|
|
|
unsealed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if unsealed >= n {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
t.Fatalf("%d cores were not unsealed", n)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-11-25 13:27:45 +00:00
|
|
|
func SealCores(t testing.T, cluster *vault.TestCluster) {
|
|
|
|
t.Helper()
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if err := core.Shutdown(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
timeout := time.Now().Add(3 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
|
|
|
t.Fatal("timeout waiting for core to seal")
|
|
|
|
}
|
|
|
|
if core.Sealed() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Helper()
|
2019-08-23 19:51:25 +00:00
|
|
|
for i := 0; i < 60; i++ {
|
2018-09-18 03:03:00 +00:00
|
|
|
sealed := 0
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if core.Core.Sealed() {
|
|
|
|
sealed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sealed >= n {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("%d cores were not sealed", n)
|
|
|
|
}
|
2019-02-19 20:03:02 +00:00
|
|
|
|
|
|
|
func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
2019-04-04 17:02:44 +00:00
|
|
|
t.Helper()
|
|
|
|
for i := 0; i < 30; i++ {
|
2019-02-19 20:03:02 +00:00
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if standby, _ := core.Core.Standby(); !standby {
|
|
|
|
return core
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("node did not become active")
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-04 17:02:44 +00:00
|
|
|
|
2020-02-15 00:39:13 +00:00
|
|
|
func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) {
|
|
|
|
t.Helper()
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
if isLeader, _, clusterAddr, _ := core.Core.Leader(); isLeader != true && clusterAddr != "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("node did not become standby")
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]byte {
|
|
|
|
t.Helper()
|
|
|
|
cluster.Logger.Info("rekeying cluster", "recovery", recovery)
|
2019-06-20 19:14:58 +00:00
|
|
|
client := cluster.Cores[0].Client
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
initFunc := client.Sys().RekeyInit
|
|
|
|
if recovery {
|
|
|
|
initFunc = client.Sys().RekeyRecoveryKeyInit
|
|
|
|
}
|
|
|
|
init, err := initFunc(&api.RekeyInitRequest{
|
2019-06-20 19:14:58 +00:00
|
|
|
SecretShares: 5,
|
|
|
|
SecretThreshold: 3,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var statusResp *api.RekeyUpdateResponse
|
2019-10-18 18:46:00 +00:00
|
|
|
var keys = cluster.BarrierKeys
|
|
|
|
if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() {
|
|
|
|
keys = cluster.RecoveryKeys
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFunc := client.Sys().RekeyUpdate
|
|
|
|
if recovery {
|
|
|
|
updateFunc = client.Sys().RekeyRecoveryKeyUpdate
|
|
|
|
}
|
|
|
|
for j := 0; j < len(keys); j++ {
|
|
|
|
statusResp, err = updateFunc(base64.StdEncoding.EncodeToString(keys[j]), init.Nonce)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if statusResp == nil {
|
|
|
|
t.Fatal("nil status response during unseal")
|
|
|
|
}
|
|
|
|
if statusResp.Complete {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
cluster.Logger.Info("cluster rekeyed", "recovery", recovery)
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() && !recovery {
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
if len(statusResp.KeysB64) != 5 {
|
|
|
|
t.Fatal("wrong number of keys")
|
|
|
|
}
|
|
|
|
|
2019-10-18 18:46:00 +00:00
|
|
|
newKeys := make([][]byte, 5)
|
2019-06-20 19:14:58 +00:00
|
|
|
for i, key := range statusResp.KeysB64 {
|
2019-10-18 18:46:00 +00:00
|
|
|
newKeys[i], err = base64.StdEncoding.DecodeString(key)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2019-10-18 18:46:00 +00:00
|
|
|
return newKeys
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
// TestRaftServerAddressProvider is a ServerAddressProvider that uses the
|
|
|
|
// ClusterAddr() of each node to provide raft addresses.
|
|
|
|
//
|
|
|
|
// Note that TestRaftServerAddressProvider should only be used in cases where
|
|
|
|
// cores that are part of a raft configuration have already had
|
|
|
|
// startClusterListener() called (via either unsealing or raft joining).
|
2019-06-20 19:14:58 +00:00
|
|
|
type TestRaftServerAddressProvider struct {
|
|
|
|
Cluster *vault.TestCluster
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
|
|
|
|
for _, core := range p.Cluster.Cores {
|
|
|
|
if core.NodeID == string(id) {
|
|
|
|
parsed, err := url.Parse(core.ClusterAddr())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return raftlib.ServerAddress(parsed.Host), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", errors.New("could not find cluster addr")
|
|
|
|
}
|
|
|
|
|
|
|
|
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
|
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
|
2020-06-16 18:12:22 +00:00
|
|
|
|
2020-06-23 19:04:13 +00:00
|
|
|
atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1)
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
leader := cluster.Cores[0]
|
|
|
|
|
2019-06-20 19:14:58 +00:00
|
|
|
// Seal the leader so we can install an address provider
|
|
|
|
{
|
2020-06-11 19:07:59 +00:00
|
|
|
EnsureCoreSealed(t, leader)
|
|
|
|
leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
|
2020-06-16 18:12:22 +00:00
|
|
|
cluster.UnsealCore(t, leader)
|
2020-06-11 19:07:59 +00:00
|
|
|
vault.TestWaitActive(t, leader.Core)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
leaderInfos := []*raft.LeaderJoinInfo{
|
|
|
|
&raft.LeaderJoinInfo{
|
|
|
|
LeaderAPIAddr: leader.Client.Address(),
|
|
|
|
TLSConfig: leader.TLSConfig,
|
|
|
|
},
|
2020-01-14 01:02:16 +00:00
|
|
|
}
|
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
// Join followers
|
2020-05-14 12:31:02 +00:00
|
|
|
for i := 1; i < len(cluster.Cores); i++ {
|
|
|
|
core := cluster.Cores[i]
|
2019-06-20 19:14:58 +00:00
|
|
|
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
|
2020-01-14 01:02:16 +00:00
|
|
|
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
|
2019-06-20 19:14:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
cluster.UnsealCore(t, core)
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2020-05-14 12:31:02 +00:00
|
|
|
// HardcodedServerAddressProvider is a ServerAddressProvider that uses
|
|
|
|
// a hardcoded map of raft node addresses.
|
|
|
|
//
|
|
|
|
// It is useful in cases where the raft configuration is known ahead of time,
|
|
|
|
// but some of the cores have not yet had startClusterListener() called (via
|
|
|
|
// either unsealing or raft joining), and thus do not yet have a ClusterAddr()
|
|
|
|
// assigned.
|
|
|
|
type HardcodedServerAddressProvider struct {
|
|
|
|
Entries map[raftlib.ServerID]raftlib.ServerAddress
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
|
|
|
|
if addr, ok := p.Entries[id]; ok {
|
|
|
|
return addr, nil
|
2019-06-20 19:14:58 +00:00
|
|
|
}
|
2020-05-14 12:31:02 +00:00
|
|
|
return "", errors.New("could not find cluster addr")
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewHardcodedServerAddressProvider is a convenience function that makes a
|
|
|
|
// ServerAddressProvider from a given cluster address base port.
|
2020-06-16 18:12:22 +00:00
|
|
|
func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.ServerAddressProvider {
|
2020-05-14 12:31:02 +00:00
|
|
|
|
|
|
|
entries := make(map[raftlib.ServerID]raftlib.ServerAddress)
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
for i := 0; i < numCores; i++ {
|
2020-05-14 12:31:02 +00:00
|
|
|
id := fmt.Sprintf("core-%d", i)
|
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i)
|
|
|
|
entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &HardcodedServerAddressProvider{
|
|
|
|
entries,
|
|
|
|
}
|
|
|
|
}
|
2019-06-20 19:14:58 +00:00
|
|
|
|
2020-06-11 19:07:59 +00:00
|
|
|
// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e.
|
|
|
|
// the correct number of servers, having the correct NodeIDs, and exactly one
|
|
|
|
// leader.
|
|
|
|
func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error {
|
|
|
|
|
|
|
|
backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
|
|
|
|
ctx := namespace.RootContext(context.Background())
|
|
|
|
config, err := backend.GetConfiguration(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
servers := config.Servers
|
|
|
|
if len(servers) != numCores {
|
|
|
|
return fmt.Errorf("Found %d servers, not %d", len(servers), numCores)
|
|
|
|
}
|
|
|
|
|
|
|
|
leaders := 0
|
|
|
|
for i, s := range servers {
|
|
|
|
if s.NodeID != fmt.Sprintf("core-%d", i) {
|
|
|
|
return fmt.Errorf("Found unexpected node ID %q", s.NodeID)
|
|
|
|
}
|
|
|
|
if s.Leader {
|
|
|
|
leaders++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if leaders != 1 {
|
|
|
|
return fmt.Errorf("Found %d leaders", leaders)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-09 19:28:17 +00:00
|
|
|
func RaftAppliedIndex(core *vault.TestClusterCore) uint64 {
|
|
|
|
return core.UnderlyingRawStorage.(*raft.RaftBackend).AppliedIndex()
|
|
|
|
}
|
|
|
|
|
|
|
|
func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
|
|
|
|
for i := 0; i < 30; i++ {
|
|
|
|
if backend.AppliedIndex() >= index {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("node did not apply index")
|
|
|
|
}
|
|
|
|
|
2020-06-16 18:12:22 +00:00
|
|
|
// AwaitLeader waits for one of the cluster's nodes to become leader.
|
|
|
|
func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) {
|
|
|
|
|
|
|
|
timeout := time.Now().Add(30 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, core := range cluster.Cores {
|
|
|
|
if core.Core.Sealed() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-07-09 19:28:17 +00:00
|
|
|
isLeader, _, _, _ := core.Leader()
|
2020-06-16 18:12:22 +00:00
|
|
|
if isLeader {
|
|
|
|
return i, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0, fmt.Errorf("timeout waiting leader")
|
|
|
|
}
|
|
|
|
|
2020-05-21 20:07:50 +00:00
|
|
|
func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
ticker := time.NewTicker(time.Second)
|
|
|
|
var err error
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
ticker.Stop()
|
|
|
|
stopCh <- struct{}{}
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
err = client.Sys().Mount("foo", &api.MountInput{
|
|
|
|
Type: "kv",
|
|
|
|
Options: map[string]string{
|
|
|
|
"version": "1",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = client.Sys().Unmount("foo")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return stopCh
|
|
|
|
}
|
2020-09-17 19:44:29 +00:00
|
|
|
|
|
|
|
func VerifyRaftPeers(t testing.T, client *api.Client, expected map[string]bool) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
resp, err := client.Logical().Read("sys/storage/raft/configuration")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error reading raft config: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp == nil || resp.Data == nil {
|
|
|
|
t.Fatal("missing response data")
|
|
|
|
}
|
|
|
|
|
|
|
|
config, ok := resp.Data["config"].(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("missing config in response data")
|
|
|
|
}
|
|
|
|
|
|
|
|
servers, ok := config["servers"].([]interface{})
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("missing servers in response data config")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate through the servers and remove the node found in the response
|
|
|
|
// from the expected collection
|
|
|
|
for _, s := range servers {
|
|
|
|
server := s.(map[string]interface{})
|
|
|
|
delete(expected, server["node_id"].(string))
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the collection is non-empty, it means that the peer was not found in
|
|
|
|
// the response.
|
|
|
|
if len(expected) != 0 {
|
|
|
|
t.Fatalf("failed to read configuration successfully, expected peers no found in configuration list: %v", expected)
|
|
|
|
}
|
|
|
|
}
|