2018-05-20 23:01:24 +00:00
|
|
|
package testhelpers
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/base64"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2019-01-22 22:16:26 +00:00
|
|
|
"net/http"
|
2018-10-15 16:56:24 +00:00
|
|
|
"sync"
|
2018-08-16 19:48:23 +00:00
|
|
|
"time"
|
2018-05-20 23:01:24 +00:00
|
|
|
|
2018-10-15 16:56:24 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2018-09-18 03:03:00 +00:00
|
|
|
"github.com/hashicorp/vault/helper/consts"
|
2018-05-20 23:01:24 +00:00
|
|
|
"github.com/hashicorp/vault/helper/xor"
|
2018-10-15 16:56:24 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
|
|
|
"github.com/hashicorp/vault/physical/inmem"
|
2018-05-20 23:01:24 +00:00
|
|
|
"github.com/hashicorp/vault/vault"
|
2018-10-15 16:56:24 +00:00
|
|
|
testing "github.com/mitchellh/go-testing-interface"
|
2018-05-20 23:01:24 +00:00
|
|
|
)
|
|
|
|
|
2018-10-15 16:56:24 +00:00
|
|
|
type ReplicatedTestClusters struct {
|
|
|
|
PerfPrimaryCluster *vault.TestCluster
|
|
|
|
PerfSecondaryCluster *vault.TestCluster
|
|
|
|
PerfPrimaryDRCluster *vault.TestCluster
|
|
|
|
PerfSecondaryDRCluster *vault.TestCluster
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ReplicatedTestClusters) Cleanup() {
|
|
|
|
r.PerfPrimaryCluster.Cleanup()
|
|
|
|
r.PerfSecondaryCluster.Cleanup()
|
|
|
|
r.PerfPrimaryDRCluster.Cleanup()
|
|
|
|
r.PerfSecondaryDRCluster.Cleanup()
|
|
|
|
}
|
|
|
|
|
2018-05-20 23:01:24 +00:00
|
|
|
// Generates a root token on the target cluster.
|
|
|
|
func GenerateRoot(t testing.T, cluster *vault.TestCluster, drToken bool) string {
|
|
|
|
token, err := GenerateRootWithError(t, cluster, drToken)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
|
|
|
func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, drToken bool) (string, error) {
|
|
|
|
// If recovery keys supported, use those to perform root token generation instead
|
|
|
|
var keys [][]byte
|
|
|
|
if cluster.Cores[0].SealAccess().RecoveryKeySupported() {
|
|
|
|
keys = cluster.RecoveryKeys
|
|
|
|
} else {
|
|
|
|
keys = cluster.BarrierKeys
|
|
|
|
}
|
|
|
|
|
|
|
|
client := cluster.Cores[0].Client
|
|
|
|
f := client.Sys().GenerateRootInit
|
|
|
|
if drToken {
|
|
|
|
f = client.Sys().GenerateDROperationTokenInit
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
status, err := f("", "")
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if status.Required > len(keys) {
|
|
|
|
return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys))
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
otp := status.OTP
|
|
|
|
|
2018-05-20 23:01:24 +00:00
|
|
|
for i, key := range keys {
|
|
|
|
if i >= status.Required {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
f := client.Sys().GenerateRootUpdate
|
|
|
|
if drToken {
|
|
|
|
f = client.Sys().GenerateDROperationTokenUpdate
|
|
|
|
}
|
|
|
|
status, err = f(base64.StdEncoding.EncodeToString(key), status.Nonce)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !status.Complete {
|
|
|
|
return "", errors.New("generate root operation did not end successfully")
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
|
|
|
tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken)
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp))
|
2018-05-20 23:01:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
return string(tokenBytes), nil
|
2018-05-20 23:01:24 +00:00
|
|
|
}
|
2018-08-16 19:48:23 +00:00
|
|
|
|
|
|
|
// RandomWithPrefix is used to generate a unique name with a prefix, for
|
|
|
|
// randomizing names in acceptance tests
|
|
|
|
func RandomWithPrefix(name string) string {
|
|
|
|
return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
|
|
|
}
|
2018-09-18 03:03:00 +00:00
|
|
|
|
|
|
|
func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) {
|
|
|
|
t.Helper()
|
|
|
|
for _, core := range c.Cores {
|
|
|
|
if !core.Sealed() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
client := core.Client
|
|
|
|
client.Sys().ResetUnsealProcess()
|
|
|
|
for j := 0; j < len(c.BarrierKeys); j++ {
|
|
|
|
statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j]))
|
|
|
|
if err != nil {
|
|
|
|
// Sometimes when we get here it's already unsealed on its own
|
|
|
|
// and then this fails for DR secondaries so check again
|
|
|
|
if core.Sealed() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if statusResp == nil {
|
|
|
|
t.Fatal("nil status response during unseal")
|
|
|
|
}
|
|
|
|
if !statusResp.Sealed {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if core.Sealed() {
|
|
|
|
t.Fatal("core is still sealed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func WaitForReplicationState(t testing.T, c *vault.Core, state consts.ReplicationState) {
|
|
|
|
timeout := time.Now().Add(10 * time.Second)
|
|
|
|
for {
|
|
|
|
if time.Now().After(timeout) {
|
|
|
|
t.Fatalf("timeout waiting for core to have state %d", uint32(state))
|
|
|
|
}
|
|
|
|
state := c.ReplicationState()
|
|
|
|
if state.HasState(state) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-22 22:16:26 +00:00
|
|
|
func GetClusterAndCore(t testing.T, logger log.Logger, handlerFunc func(*vault.HandlerProperties) http.Handler) (*vault.TestCluster, *vault.TestClusterCore) {
|
2018-10-15 16:56:24 +00:00
|
|
|
inm, err := inmem.NewTransactionalInmem(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
inmha, err := inmem.NewInmemHA(nil, logger)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
coreConfig := &vault.CoreConfig{
|
|
|
|
Physical: inm,
|
|
|
|
HAPhysical: inmha.(physical.HABackend),
|
|
|
|
}
|
|
|
|
|
|
|
|
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
|
2019-01-22 22:16:26 +00:00
|
|
|
HandlerFunc: handlerFunc,
|
2018-10-15 16:56:24 +00:00
|
|
|
Logger: logger,
|
|
|
|
})
|
|
|
|
cluster.Start()
|
|
|
|
|
|
|
|
cores := cluster.Cores
|
|
|
|
core := cores[0]
|
|
|
|
|
|
|
|
vault.TestWaitActive(t, core.Core)
|
|
|
|
|
|
|
|
return cluster, core
|
|
|
|
}
|
|
|
|
|
2019-01-22 22:16:26 +00:00
|
|
|
func GetFourReplicatedClusters(t testing.T, handlerFunc func(*vault.HandlerProperties) http.Handler) *ReplicatedTestClusters {
|
2018-10-15 16:56:24 +00:00
|
|
|
ret := &ReplicatedTestClusters{}
|
|
|
|
|
|
|
|
logger := log.New(&log.LoggerOptions{
|
|
|
|
Mutex: &sync.Mutex{},
|
|
|
|
Level: log.Trace,
|
|
|
|
})
|
|
|
|
// Set this lower so that state populates quickly to standby nodes
|
|
|
|
vault.HeartbeatInterval = 2 * time.Second
|
|
|
|
|
2019-01-22 22:16:26 +00:00
|
|
|
ret.PerfPrimaryCluster, _ = GetClusterAndCore(t, logger.Named("perf-pri"), handlerFunc)
|
2018-10-15 16:56:24 +00:00
|
|
|
|
2019-01-22 22:16:26 +00:00
|
|
|
ret.PerfSecondaryCluster, _ = GetClusterAndCore(t, logger.Named("perf-sec"), handlerFunc)
|
2018-10-15 16:56:24 +00:00
|
|
|
|
2019-01-22 22:16:26 +00:00
|
|
|
ret.PerfPrimaryDRCluster, _ = GetClusterAndCore(t, logger.Named("perf-pri-dr"), handlerFunc)
|
2018-10-15 16:56:24 +00:00
|
|
|
|
2019-01-22 22:16:26 +00:00
|
|
|
ret.PerfSecondaryDRCluster, _ = GetClusterAndCore(t, logger.Named("perf-sec-dr"), handlerFunc)
|
2018-10-15 16:56:24 +00:00
|
|
|
|
|
|
|
SetupFourClusterReplication(t, ret.PerfPrimaryCluster, ret.PerfSecondaryCluster, ret.PerfPrimaryDRCluster, ret.PerfSecondaryDRCluster)
|
|
|
|
|
|
|
|
// Wait until poison pills have been read
|
|
|
|
time.Sleep(45 * time.Second)
|
|
|
|
EnsureCoresUnsealed(t, ret.PerfPrimaryCluster)
|
|
|
|
EnsureCoresUnsealed(t, ret.PerfSecondaryCluster)
|
|
|
|
EnsureCoresUnsealed(t, ret.PerfPrimaryDRCluster)
|
|
|
|
EnsureCoresUnsealed(t, ret.PerfSecondaryDRCluster)
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2018-09-18 03:03:00 +00:00
|
|
|
func SetupFourClusterReplication(t testing.T, perfPrimary, perfSecondary, perfDRSecondary, perfSecondaryDRSecondary *vault.TestCluster) {
|
|
|
|
// Enable dr primary
|
|
|
|
_, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/enable", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
WaitForReplicationState(t, perfPrimary.Cores[0].Core, consts.ReplicationDRPrimary)
|
|
|
|
|
|
|
|
// Enable performance primary
|
2018-10-15 16:56:24 +00:00
|
|
|
_, err = perfPrimary.Cores[0].Client.Logical().Write("sys/replication/performance/primary/enable", nil)
|
2018-09-18 03:03:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
WaitForReplicationState(t, perfPrimary.Cores[0].Core, consts.ReplicationPerformancePrimary)
|
|
|
|
|
|
|
|
// get dr token
|
|
|
|
secret, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/secondary-token", map[string]interface{}{
|
|
|
|
"id": "1",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
token := secret.WrapInfo.Token
|
|
|
|
|
|
|
|
// enable dr secondary
|
|
|
|
secret, err = perfDRSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/secondary/enable", map[string]interface{}{
|
|
|
|
"token": token,
|
|
|
|
"ca_file": perfPrimary.CACertPEMFile,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
WaitForReplicationState(t, perfDRSecondary.Cores[0].Core, consts.ReplicationDRSecondary)
|
|
|
|
perfDRSecondary.BarrierKeys = perfPrimary.BarrierKeys
|
|
|
|
EnsureCoresUnsealed(t, perfDRSecondary)
|
|
|
|
|
|
|
|
// get performance token
|
2018-10-15 16:56:24 +00:00
|
|
|
secret, err = perfPrimary.Cores[0].Client.Logical().Write("sys/replication/performance/primary/secondary-token", map[string]interface{}{
|
2018-09-18 03:03:00 +00:00
|
|
|
"id": "1",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
token = secret.WrapInfo.Token
|
|
|
|
|
|
|
|
// enable performace secondary
|
2018-10-15 16:56:24 +00:00
|
|
|
secret, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/performance/secondary/enable", map[string]interface{}{
|
2018-09-18 03:03:00 +00:00
|
|
|
"token": token,
|
|
|
|
"ca_file": perfPrimary.CACertPEMFile,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
WaitForReplicationState(t, perfSecondary.Cores[0].Core, consts.ReplicationPerformanceSecondary)
|
|
|
|
time.Sleep(time.Second * 3)
|
|
|
|
perfSecondary.BarrierKeys = perfPrimary.BarrierKeys
|
|
|
|
|
|
|
|
EnsureCoresUnsealed(t, perfSecondary)
|
|
|
|
rootToken := GenerateRoot(t, perfSecondary, false)
|
|
|
|
perfSecondary.Cores[0].Client.SetToken(rootToken)
|
|
|
|
|
|
|
|
// Enable dr primary on perf secondary
|
|
|
|
_, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/enable", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
WaitForReplicationState(t, perfSecondary.Cores[0].Core, consts.ReplicationDRPrimary)
|
|
|
|
|
|
|
|
// get dr token from perf secondary
|
|
|
|
secret, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/secondary-token", map[string]interface{}{
|
|
|
|
"id": "1",
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
token = secret.WrapInfo.Token
|
|
|
|
|
|
|
|
// enable dr secondary
|
|
|
|
secret, err = perfSecondaryDRSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/secondary/enable", map[string]interface{}{
|
|
|
|
"token": token,
|
|
|
|
"ca_file": perfSecondary.CACertPEMFile,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
WaitForReplicationState(t, perfSecondaryDRSecondary.Cores[0].Core, consts.ReplicationDRSecondary)
|
|
|
|
perfSecondaryDRSecondary.BarrierKeys = perfPrimary.BarrierKeys
|
|
|
|
EnsureCoresUnsealed(t, perfSecondaryDRSecondary)
|
|
|
|
|
|
|
|
perfDRSecondary.Cores[0].Client.SetToken(perfPrimary.Cores[0].Client.Token())
|
|
|
|
perfSecondaryDRSecondary.Cores[0].Client.SetToken(rootToken)
|
|
|
|
}
|
|
|
|
|
|
|
|
func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
leaderResp, err := core.Client.Sys().Leader()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if leaderResp.IsSelf {
|
|
|
|
return core
|
|
|
|
}
|
|
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
}
|
|
|
|
t.Fatal("could not derive the active core")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
sealed := 0
|
|
|
|
for _, core := range cluster.Cores {
|
|
|
|
if core.Core.Sealed() {
|
|
|
|
sealed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sealed >= n {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("%d cores were not sealed", n)
|
|
|
|
}
|