Test pre-1.4 seal migration (#9085)

* enable seal wrap in all seal migration tests

* move adjustForSealMigration to vault package

* fix adjustForSealMigration

* begin working on new seal migration test

* create shamir seal migration test

* refactor testhelpers

* add VerifyRaftConfiguration to testhelpers

* stub out TestTransit

* Revert "refactor testhelpers"

This reverts commit 39593defd0d4c6fd79aedfd37df6298391abb9db.

* get shamir test working again

* stub out transit join

* work on transit join

* Revert "move resuable storage test to avoid creating import cycle"

This reverts commit b3ff2317381a5af12a53117f87d1c6fbb093af6b.

* remove debug code

* initTransit now works with raft join

* runTransit works with inmem

* work on runTransit with raft

* runTransit works with raft

* get rid of dis-used test

* cleanup tests

* TestSealMigration_TransitToShamir_Pre14

* TestSealMigration_ShamirToTransit_Pre14

* split for pre-1.4 testing

* add simple tests for transit and shamir

* fix typo in test suite

* debug wrapper type

* test debug

* test-debug

* refactor core migration

* Revert "refactor core migration"

This reverts commit a776452d32a9dca7a51e3df4a76b9234d8c0c7ce.

* begin refactor of adjustForSealMigration

* fix bug in adjustForSealMigration

* clean up tests

* clean up core refactoring

* fix bug in shamir->transit migration

* remove unnecessary lock from setSealsForMigration()

* rename sealmigration test package

* use ephemeral ports below 30000

* simplify use of numTestCores
This commit is contained in:
Mike Jarmy 2020-06-11 15:07:59 -04:00 committed by GitHub
parent 7a2eabde69
commit 4303790aae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 880 additions and 1111 deletions

View File

@ -1,750 +0,0 @@
package command
import (
"context"
"encoding/base64"
"testing"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/vault"
vaultseal "github.com/hashicorp/vault/vault/seal"
)
func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) {
t.Helper()
if cfg.Type != sealType {
t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType)
}
if cfg.SecretShares != shares {
t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares)
}
if cfg.SecretThreshold != threshold {
t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold)
}
if cfg.StoredShares != stored {
t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored)
}
}
func TestSealMigration_ShamirToTransit(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTransit(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTransit(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTransit(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTransit(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationShamirToTransit(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create a cluster that uses shamir.
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
// Initialize the cluster, and unseal it using the shamir keys.
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
SecretShares: 5,
SecretThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
var resp *api.SealStatusResponse
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Create the transit server.
tcluster := sealhelper.NewTransitSealServer(t)
defer tcluster.Cleanup()
tcluster.MakeKey(t, "key1")
transitSeal := tcluster.MakeSeal(t, "key1")
// Transition to transit seal.
if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, transitSeal, nil); err != nil {
t.Fatal(err)
}
// Unseal and migrate to transit.
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal the cluster.
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Seal the transit cluster; we expect the unseal of our main cluster
// to fail as a result.
tcluster.EnsureCoresSealed(t)
// Verify that we cannot unseal. Now the barrier unseal keys are actually
// the recovery keys.
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
break
}
if resp == nil || !resp.Sealed {
break
}
}
if err == nil || resp != nil {
t.Fatalf("expected sealed state; got %#v", resp)
}
// Unseal the transit server; we expect the unseal to work now on our main
// cluster.
tcluster.UnsealCores(t)
// Verify that we can unseal.
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
}
func TestSealMigration_ShamirToTestSeal(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTestSeal(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTestSeal(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTestSeal(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToTestSeal(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationShamirToTestSeal(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create a cluster that uses shamir.
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
// Initialize the cluster, and unseal it using the shamir keys.
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
SecretShares: 5,
SecretThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
var resp *api.SealStatusResponse
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Create a test seal
testSeal := vault.NewAutoSeal(vaultseal.NewTestSeal(&vaultseal.TestSealOpts{}))
// Transition to test seal.
if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, testSeal, nil); err != nil {
t.Fatal(err)
}
// Unseal and migrate to test seal.
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal the cluster.
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Verify that we can unseal.
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
}
func TestSealMigration_TransitToTestSeal(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create the transit server.
tcluster := sealhelper.NewTransitSealServer(t)
defer func() {
if tcluster != nil {
tcluster.Cleanup()
}
}()
tcluster.MakeKey(t, "key1")
var transitSeal vault.Seal
// Create a cluster that uses transit.
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
transitSeal = tcluster.MakeSeal(t, "key1")
return transitSeal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
// Initialize the cluster, and fetch the recovery keys.
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Create a test seal
testSeal := vault.NewAutoSeal(vaultseal.NewTestSeal(&vaultseal.TestSealOpts{}))
// Transition to test seal.
if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, testSeal, transitSeal); err != nil {
t.Fatal(err)
}
// Unseal and migrate to Test Seal.
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal the cluster.
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Unseal the cluster. Now the recovery keys are actually the barrier
// unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
// Now that migration is done, we can stop the transit cluster, since we
// can seal/unseal without it.
tcluster.Cleanup()
tcluster = nil
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
}
func TestSealMigration_TransitToShamir(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create the transit server.
tcluster := sealhelper.NewTransitSealServer(t)
defer func() {
if tcluster != nil {
tcluster.Cleanup()
}
}()
tcluster.MakeKey(t, "key1")
var transitSeal vault.Seal
// Create a cluster that uses transit.
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
transitSeal = tcluster.MakeSeal(t, "key1")
return transitSeal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
// Initialize the cluster, and fetch the recovery keys.
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Create a Shamir seal.
logger := cluster.Logger.Named("shamir")
shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: logger,
}),
})
// Transition to Shamir seal.
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, transitSeal); err != nil {
t.Fatal(err)
}
// Unseal and migrate to Shamir.
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal the cluster.
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Unseal the cluster. Now the recovery keys are actually the barrier
// unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
// Now that migration is done, we can stop the transit cluster, since we
// can seal/unseal without it.
tcluster.Cleanup()
tcluster = nil
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
}
/*
func TestSealMigration_TransitToTransit(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTransit(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTransit(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTransit(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTransit(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationTransitToTransit(t *testing.T, setup teststorage.ClusterSetupMutator) {
tcluster := sealhelper.NewTransitSealServer(t)
defer tcluster.Cleanup()
tcluster.MakeKey(t, "key1")
tcluster.MakeKey(t, "key2")
var seals []vault.Seal
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
tseal := tcluster.MakeSeal(t, "key1")
seals = append(seals, tseal)
return tseal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
rootToken := initResp.RootToken
client.SetToken(rootToken)
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
logger := cluster.Logger.Named("shamir")
autoSeal2 := tcluster.MakeSeal(t, "key2")
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, autoSeal2, seals[0]); err != nil {
t.Fatal(err)
}
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Delete the original seal's transit key.
_, err = tcluster.Cores[0].Client.Logical().Delete(path.Join("transit", "keys", "key1"))
if err != nil {
t.Fatal(err)
}
err = cluster.Cores[0].Core.UnsealWithStoredKeys(context.Background())
if err != nil {
t.Fatal(err)
}
}
*/

View File

@ -1120,6 +1120,7 @@ func (c *ServerCommand) Run(args []string) int {
HAPhysical: nil,
ServiceRegistration: configSR,
Seal: barrierSeal,
UnwrapSeal: unwrapSeal,
AuditBackends: c.AuditBackends,
CredentialBackends: c.CredentialBackends,
LogicalBackends: c.LogicalBackends,
@ -1528,12 +1529,6 @@ CLUSTER_SYNTHESIS_COMPLETE:
Core: core,
}))
// Before unsealing with stored keys, setup seal migration if needed
if err := adjustCoreForSealMigration(c.logger, core, barrierSeal, unwrapSeal); err != nil {
c.UI.Error(err.Error())
return 1
}
// Attempt unsealing in a background goroutine. This is needed for when a
// Vault cluster with multiple servers is configured with auto-unseal but is
// uninitialized. Once one server initializes the storage backend, this

View File

@ -1,16 +1,8 @@
package command
import (
"context"
"fmt"
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/vault"
vaultseal "github.com/hashicorp/vault/vault/seal"
"github.com/pkg/errors"
)
var (
@ -19,106 +11,3 @@ var (
func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) {
}
func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error {
existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background())
if err != nil {
return fmt.Errorf("Error checking for existing seal: %s", err)
}
// If we don't have an existing config or if it's the deprecated auto seal
// which needs an upgrade, skip out
if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated {
return nil
}
if unwrapSeal == nil {
// We have the same barrier type and the unwrap seal is nil so we're not
// migrating from same to same, IOW we assume it's not a migration
if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
return nil
}
// If we're not coming from Shamir, and the existing type doesn't match
// the barrier type, we need both the migration seal and the new seal
if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir {
return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`)
}
} else {
if unwrapSeal.BarrierType() == wrapping.Shamir {
return errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
}
}
if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
return errors.New(`Recovery seal configuration not found for existing seal`)
}
var migrationSeal vault.Seal
var newSeal vault.Seal
// Determine the migrationSeal. This is either going to be an instance of
// shamir or the unwrapSeal.
switch existBarrierSealConfig.Type {
case wrapping.Shamir:
// The value reflected in config is what we're going to
migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
default:
// If we're not coming from Shamir we expect the previous seal to be
// in the config and disabled.
migrationSeal = unwrapSeal
}
// newSeal will be the barrierSeal
newSeal = barrierSeal
if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
return errors.New("Migrating between same seal types is currently not supported")
}
if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
// In this case our migration seal is set so we are using it
// (potentially) for unwrapping. Set it on core for that purpose then
// exit.
core.SetSealsForMigration(nil, nil, unwrapSeal)
return nil
}
// Set the appropriate barrier and recovery configs.
switch {
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
// Migrating from auto->auto, copy the configs over
newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported():
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
newSealConfig := existRecoverySealConfig.Clone()
newSealConfig.StoredShares = 1
newSeal.SetCachedBarrierConfig(newSealConfig)
case newSeal != nil && newSeal.RecoveryKeySupported():
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
newBarrierSealConfig := &vault.SealConfig{
Type: newSeal.BarrierType(),
SecretShares: 1,
SecretThreshold: 1,
StoredShares: 1,
}
newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
newRecoveryConfig := existBarrierSealConfig.Clone()
newRecoveryConfig.StoredShares = 0
newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
}
core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal)
return nil
}

View File

@ -412,42 +412,79 @@ func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib
}
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
raftClusterJoinNodes(t, cluster, false)
}
leaderCore := cluster.Cores[0]
leaderAPI := leaderCore.Client.Address()
func RaftClusterJoinNodesWithStoredKeys(t testing.T, cluster *vault.TestCluster) {
raftClusterJoinNodes(t, cluster, true)
}
func raftClusterJoinNodes(t testing.T, cluster *vault.TestCluster, useStoredKeys bool) {
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
atomic.StoreUint32(&vault.UpdateClusterAddrForTests, 1)
leader := cluster.Cores[0]
// Seal the leader so we can install an address provider
{
EnsureCoreSealed(t, leaderCore)
leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
cluster.UnsealCore(t, leaderCore)
vault.TestWaitActive(t, leaderCore.Core)
EnsureCoreSealed(t, leader)
leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
if useStoredKeys {
cluster.UnsealCoreWithStoredKeys(t, leader)
} else {
cluster.UnsealCore(t, leader)
}
vault.TestWaitActive(t, leader.Core)
}
leaderInfo := &raft.LeaderJoinInfo{
LeaderAPIAddr: leaderAPI,
TLSConfig: leaderCore.TLSConfig,
leaderInfos := []*raft.LeaderJoinInfo{
&raft.LeaderJoinInfo{
LeaderAPIAddr: leader.Client.Address(),
TLSConfig: leader.TLSConfig,
},
}
// Join followers
for i := 1; i < len(cluster.Cores); i++ {
core := cluster.Cores[i]
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
leaderInfos := []*raft.LeaderJoinInfo{
leaderInfo,
}
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
if err != nil {
t.Fatal(err)
}
cluster.UnsealCore(t, core)
if useStoredKeys {
// For autounseal, the raft backend is not initialized right away
// after the join. We need to wait briefly before we can unseal.
awaitUnsealWithStoredKeys(t, core)
} else {
cluster.UnsealCore(t, core)
}
}
WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
}
func awaitUnsealWithStoredKeys(t testing.T, core *vault.TestClusterCore) {
timeout := time.Now().Add(30 * time.Second)
for {
if time.Now().After(timeout) {
t.Fatal("raft join: timeout waiting for core to unseal")
}
// Its actually ok for an error to happen here the first couple of
// times -- it means the raft join hasn't gotten around to initializing
// the backend yet.
err := core.UnsealWithStoredKeys(context.Background())
if err == nil {
return
}
core.Logger().Warn("raft join: failed to unseal core", "error", err)
time.Sleep(time.Second)
}
}
// HardcodedServerAddressProvider is a ServerAddressProvider that uses
// a hardcoded map of raft node addresses.
//
@ -494,6 +531,40 @@ func SetRaftAddressProviders(t testing.T, cluster *vault.TestCluster, provider r
}
}
// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e.
// the correct number of servers, having the correct NodeIDs, and exactly one
// leader.
func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error {
backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
ctx := namespace.RootContext(context.Background())
config, err := backend.GetConfiguration(ctx)
if err != nil {
return err
}
servers := config.Servers
if len(servers) != numCores {
return fmt.Errorf("Found %d servers, not %d", len(servers), numCores)
}
leaders := 0
for i, s := range servers {
if s.NodeID != fmt.Sprintf("core-%d", i) {
return fmt.Errorf("Found unexpected node ID %q", s.NodeID)
}
if s.Leader {
leaders++
}
}
if leaders != 1 {
return fmt.Errorf("Found %d leaders", leaders)
}
return nil
}
func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
t.Helper()

View File

@ -1,220 +0,0 @@
package teststorage
import (
"context"
"fmt"
"testing"
"time"
"github.com/go-test/deep"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/testhelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/vault"
)
const numTestCores = 5
func TestReusableStorage(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name())
t.Run("inmem", func(t *testing.T) {
t.Parallel()
logger := logger.Named("inmem")
storage, cleanup := MakeReusableStorage(
t, logger, MakeInmemBackend(t, logger))
defer cleanup()
testReusableStorage(t, logger, storage, 51000)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
logger := logger.Named("file")
storage, cleanup := MakeReusableStorage(
t, logger, MakeFileBackend(t, logger))
defer cleanup()
testReusableStorage(t, logger, storage, 52000)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
logger := logger.Named("consul")
storage, cleanup := MakeReusableStorage(
t, logger, MakeConsulBackend(t, logger))
defer cleanup()
testReusableStorage(t, logger, storage, 53000)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
logger := logger.Named("raft")
storage, cleanup := MakeReusableRaftStorage(t, logger, numTestCores)
defer cleanup()
testReusableStorage(t, logger, storage, 54000)
})
}
func testReusableStorage(
t *testing.T, logger hclog.Logger,
storage ReusableStorage, basePort int) {
rootToken, keys := initializeStorage(t, logger, storage, basePort)
reuseStorage(t, logger, storage, basePort, rootToken, keys)
}
// initializeStorage initializes a brand new backend storage.
func initializeStorage(
t *testing.T, logger hclog.Logger,
storage ReusableStorage, basePort int) (string, [][]byte) {
var baseClusterPort = basePort + 10
// Start the cluster
var conf = vault.CoreConfig{
Logger: logger.Named("initializeStorage"),
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
if storage.IsRaft {
// Join raft cluster
testhelpers.RaftClusterJoinNodes(t, cluster)
time.Sleep(15 * time.Second)
verifyRaftConfiguration(t, leader)
} else {
// Unseal
cluster.UnsealCores(t)
}
// Wait until unsealed
testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores)
// Write a secret that we will read back out later.
_, err := client.Logical().Write(
"secret/foo",
map[string]interface{}{"zork": "quux"})
if err != nil {
t.Fatal(err)
}
// Seal the cluster
cluster.EnsureCoresSealed(t)
return cluster.RootToken, cluster.BarrierKeys
}
// reuseStorage uses a pre-populated backend storage.
func reuseStorage(
t *testing.T, logger hclog.Logger,
storage ReusableStorage, basePort int,
rootToken string, keys [][]byte) {
var baseClusterPort = basePort + 10
// Start the cluster
var conf = vault.CoreConfig{
Logger: logger.Named("reuseStorage"),
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
SkipInit: true,
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
client.SetToken(rootToken)
cluster.BarrierKeys = keys
if storage.IsRaft {
// Set hardcoded Raft address providers
provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort)
testhelpers.SetRaftAddressProviders(t, cluster, provider)
// Unseal cores
for _, core := range cluster.Cores {
cluster.UnsealCore(t, core)
}
time.Sleep(15 * time.Second)
verifyRaftConfiguration(t, leader)
} else {
// Unseal
cluster.UnsealCores(t)
}
// Wait until unsealed
testhelpers.WaitForNCoresUnsealed(t, cluster, numTestCores)
// Read the secret
secret, err := client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
}
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff)
}
// Seal the cluster
cluster.EnsureCoresSealed(t)
}
func verifyRaftConfiguration(t *testing.T, core *vault.TestClusterCore) {
backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
ctx := namespace.RootContext(context.Background())
config, err := backend.GetConfiguration(ctx)
if err != nil {
t.Fatal(err)
}
servers := config.Servers
if len(servers) != numTestCores {
t.Fatalf("Found %d servers, not %d", len(servers), numTestCores)
}
leaders := 0
for i, s := range servers {
if diff := deep.Equal(s.NodeID, fmt.Sprintf("core-%d", i)); len(diff) > 0 {
t.Fatal(diff)
}
if s.Leader {
leaders++
}
}
if leaders != 1 {
t.Fatalf("Found %d leaders, not 1", leaders)
}
}

View File

@ -546,7 +546,8 @@ type CoreConfig struct {
ServiceRegistration sr.ServiceRegistration
Seal Seal
Seal Seal
UnwrapSeal Seal
SecureRandomReader io.Reader
@ -942,6 +943,11 @@ func NewCore(conf *CoreConfig) (*Core, error) {
c.clusterListener.Store((*cluster.Listener)(nil))
err = c.adjustForSealMigration(conf.UnwrapSeal)
if err != nil {
return nil, err
}
return c, nil
}
@ -2224,9 +2230,113 @@ func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfi
return barrierConf, recoveryConf, nil
}
func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) {
c.stateLock.Lock()
defer c.stateLock.Unlock()
func (c *Core) adjustForSealMigration(unwrapSeal Seal) error {
barrierSeal := c.seal
existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(context.Background())
if err != nil {
return fmt.Errorf("Error checking for existing seal: %s", err)
}
// If we don't have an existing config or if it's the deprecated auto seal
// which needs an upgrade, skip out
if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated {
return nil
}
if unwrapSeal == nil {
// We have the same barrier type and the unwrap seal is nil so we're not
// migrating from same to same, IOW we assume it's not a migration
if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
return nil
}
// If we're not coming from Shamir, and the existing type doesn't match
// the barrier type, we need both the migration seal and the new seal
if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir {
return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`)
}
} else {
if unwrapSeal.BarrierType() == wrapping.Shamir {
return errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
}
}
if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
return errors.New(`Recovery seal configuration not found for existing seal`)
}
var migrationSeal Seal
var newSeal Seal
// Determine the migrationSeal. This is either going to be an instance of
// shamir or the unwrapSeal.
switch existBarrierSealConfig.Type {
case wrapping.Shamir:
// The value reflected in config is what we're going to
migrationSeal = NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: c.logger.Named("shamir"),
}),
})
default:
// If we're not coming from Shamir we expect the previous seal to be
// in the config and disabled.
migrationSeal = unwrapSeal
}
// newSeal will be the barrierSeal
newSeal = barrierSeal
if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
return errors.New("Migrating between same seal types is currently not supported")
}
if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
// In this case our migration seal is set so we are using it
// (potentially) for unwrapping. Set it on core for that purpose then
// exit.
c.setSealsForMigration(nil, nil, unwrapSeal)
return nil
}
// Set the appropriate barrier and recovery configs.
switch {
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
// Migrating from auto->auto, copy the configs over
newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported():
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
newSealConfig := existRecoverySealConfig.Clone()
newSealConfig.StoredShares = 1
newSeal.SetCachedBarrierConfig(newSealConfig)
case newSeal != nil && newSeal.RecoveryKeySupported():
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
newBarrierSealConfig := &SealConfig{
Type: newSeal.BarrierType(),
SecretShares: 1,
SecretThreshold: 1,
StoredShares: 1,
}
newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
newRecoveryConfig := existBarrierSealConfig.Clone()
newRecoveryConfig.StoredShares = 0
newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
}
c.setSealsForMigration(migrationSeal, newSeal, unwrapSeal)
return nil
}
func (c *Core) setSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) {
c.unwrapSeal = unwrapSeal
if c.unwrapSeal != nil {
c.unwrapSeal.SetCore(c)

View File

@ -0,0 +1,134 @@
// +build !enterprise
package sealmigration
import (
"context"
"fmt"
"testing"
"time"
"github.com/go-test/deep"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/vault/helper/testhelpers"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/vault"
)
// TestSealMigration_TransitToShamir_Pre14 tests transit-to-shamir seal
// migration, using the pre-1.4 method of bring down the whole cluster to do
// the migration.
func TestSealMigration_TransitToShamir_Pre14(t *testing.T) {
// Note that we do not test integrated raft storage since this is
// a pre-1.4 test.
testVariousBackends(t, testSealMigrationTransitToShamir_Pre14, false)
}
func testSealMigrationTransitToShamir_Pre14(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int) {
// Create the transit server.
tss := sealhelper.NewTransitSealServer(t)
defer func() {
if tss != nil {
tss.Cleanup()
}
}()
tss.MakeKey(t, "transit-seal-key")
// Initialize the backend with transit.
rootToken, recoveryKeys, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
// Migrate the backend from transit to shamir
migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys)
// Now that migration is done, we can nuke the transit server, since we
// can unseal without it.
tss.Cleanup()
tss = nil
// Run the backend with shamir. Note that the recovery keys are now the
// barrier keys.
runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
}
func migrateFromTransitToShamir_Pre14(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int,
tss *sealhelper.TransitSealServer, transitSeal vault.Seal,
rootToken string, recoveryKeys [][]byte) {
var baseClusterPort = basePort + 10
var conf = vault.CoreConfig{
Logger: logger.Named("migrateFromTransitToShamir"),
// N.B. Providing an UnwrapSeal puts us in migration mode. This is the
// equivalent of doing the following in HCL:
// seal "transit" {
// // ...
// disabled = "true"
// }
UnwrapSeal: transitSeal,
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
SkipInit: true,
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
client.SetToken(rootToken)
// Attempt to unseal while the transit server is unreachable. Although
// we're unsealing using the recovery keys, this is still an
// autounseal, so it should fail.
tss.EnsureCoresSealed(t)
unsealMigrate(t, client, recoveryKeys, false)
tss.UnsealCores(t)
testhelpers.WaitForActiveNode(t, tss.TestCluster)
// Unseal and migrate to Shamir. Although we're unsealing using the
// recovery keys, this is still an autounseal.
unsealMigrate(t, client, recoveryKeys, true)
testhelpers.WaitForActiveNode(t, cluster)
// Wait for migration to finish. Sadly there is no callback, and the
// test will fail later on if we don't do this.
time.Sleep(10 * time.Second)
// Read the secret
secret, err := client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
}
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff)
}
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
cluster.EnsureCoresSealed(t)
}

View File

@ -0,0 +1,517 @@
package sealmigration
import (
"context"
"encoding/base64"
"fmt"
"testing"
"time"
"github.com/go-test/deep"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/vault"
)
const (
numTestCores = 5
keyShares = 3
keyThreshold = 3
)
type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int)
func testVariousBackends(t *testing.T, tf testFunc, includeRaft bool) {
logger := logging.NewVaultLogger(hclog.Debug).Named(t.Name())
t.Run("inmem", func(t *testing.T) {
t.Parallel()
logger := logger.Named("inmem")
storage, cleanup := teststorage.MakeReusableStorage(
t, logger, teststorage.MakeInmemBackend(t, logger))
defer cleanup()
tf(t, logger, storage, 20000)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
logger := logger.Named("file")
storage, cleanup := teststorage.MakeReusableStorage(
t, logger, teststorage.MakeFileBackend(t, logger))
defer cleanup()
tf(t, logger, storage, 20020)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
logger := logger.Named("consul")
storage, cleanup := teststorage.MakeReusableStorage(
t, logger, teststorage.MakeConsulBackend(t, logger))
defer cleanup()
tf(t, logger, storage, 20040)
})
if includeRaft {
t.Run("raft", func(t *testing.T) {
t.Parallel()
logger := logger.Named("raft")
storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores)
defer cleanup()
tf(t, logger, storage, 20060)
})
}
}
// TestSealMigration_ShamirToTransit_Pre14 tests shamir-to-transit seal
// migration, using the pre-1.4 method of bring down the whole cluster to do
// the migration.
func TestSealMigration_ShamirToTransit_Pre14(t *testing.T) {
// Note that we do not test integrated raft storage since this is
// a pre-1.4 test.
testVariousBackends(t, testSealMigrationShamirToTransit_Pre14, false)
}
func testSealMigrationShamirToTransit_Pre14(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int) {
// Initialize the backend using shamir
rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort)
// Create the transit server.
tss := sealhelper.NewTransitSealServer(t)
defer func() {
tss.EnsureCoresSealed(t)
tss.Cleanup()
}()
tss.MakeKey(t, "transit-seal-key")
// Migrate the backend from shamir to transit. Note that the barrier keys
// are now the recovery keys.
transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys)
// Run the backend with transit.
runTransit(t, logger, storage, basePort, rootToken, transitSeal)
}
func migrateFromShamirToTransit_Pre14(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int,
tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte,
) vault.Seal {
var baseClusterPort = basePort + 10
var transitSeal vault.Seal
var conf = vault.CoreConfig{
Logger: logger.Named("migrateFromShamirToTransit"),
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
SkipInit: true,
// N.B. Providing a transit seal puts us in migration mode.
SealFunc: func() vault.Seal {
// Each core will create its own transit seal here. Later
// on it won't matter which one of these we end up using, since
// they were all created from the same transit key.
transitSeal = tss.MakeSeal(t, "transit-seal-key")
return transitSeal
},
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
client.SetToken(rootToken)
// Unseal and migrate to Transit.
unsealMigrate(t, client, recoveryKeys, true)
// Wait for migration to finish. Sadly there is no callback, and the
// test will fail later on if we don't do this.
time.Sleep(10 * time.Second)
// Read the secret
secret, err := client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
}
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff)
}
// Make sure the seal configs were updated correctly.
b, r, err := leader.Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
cluster.EnsureCoresSealed(t)
return transitSeal
}
func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) {
for i, key := range keys {
// Try to unseal with missing "migrate" parameter
_, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
Key: base64.StdEncoding.EncodeToString(key),
})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
// Unseal with "migrate" parameter
resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
Key: base64.StdEncoding.EncodeToString(key),
Migrate: true,
})
if i < keyThreshold-1 {
// Not enough keys have been provided yet.
if err != nil {
t.Fatal(err)
}
} else {
if transitServerAvailable {
// The transit server is running.
if err != nil {
t.Fatal(err)
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
} else {
// The transit server is stopped.
if err == nil {
t.Fatal("expected error due to transit server being stopped.")
}
}
break
}
}
}
// verifyBarrierConfig verifies that a barrier configuration is correct.
func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) {
t.Helper()
if cfg.Type != sealType {
t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType)
}
if cfg.SecretShares != shares {
t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares)
}
if cfg.SecretThreshold != threshold {
t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold)
}
if cfg.StoredShares != stored {
t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored)
}
}
// initializeShamir initializes a brand new backend storage with Shamir.
func initializeShamir(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int) (string, [][]byte) {
var baseClusterPort = basePort + 10
// Start the cluster
var conf = vault.CoreConfig{
Logger: logger.Named("initializeShamir"),
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
// Unseal
if storage.IsRaft {
testhelpers.RaftClusterJoinNodes(t, cluster)
if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
t.Fatal(err)
}
} else {
cluster.UnsealCores(t)
}
testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
// Write a secret that we will read back out later.
_, err := client.Logical().Write(
"secret/foo",
map[string]interface{}{"zork": "quux"})
if err != nil {
t.Fatal(err)
}
// Seal the cluster
cluster.EnsureCoresSealed(t)
return cluster.RootToken, cluster.BarrierKeys
}
// runShamir uses a pre-populated backend storage with Shamir.
func runShamir(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int,
rootToken string, barrierKeys [][]byte) {
var baseClusterPort = basePort + 10
// Start the cluster
var conf = vault.CoreConfig{
Logger: logger.Named("runShamir"),
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
SkipInit: true,
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
client.SetToken(rootToken)
// Unseal
cluster.BarrierKeys = barrierKeys
if storage.IsRaft {
provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort)
testhelpers.SetRaftAddressProviders(t, cluster, provider)
for _, core := range cluster.Cores {
cluster.UnsealCore(t, core)
}
// This is apparently necessary for the raft cluster to get itself
// situated.
time.Sleep(15 * time.Second)
if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
t.Fatal(err)
}
} else {
cluster.UnsealCores(t)
}
testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
// Read the secret
secret, err := client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
}
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff)
}
// Seal the cluster
cluster.EnsureCoresSealed(t)
}
// initializeTransit initializes a brand new backend storage with Transit.
func initializeTransit(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int,
tss *sealhelper.TransitSealServer) (string, [][]byte, vault.Seal) {
var transitSeal vault.Seal
var baseClusterPort = basePort + 10
// Start the cluster
var conf = vault.CoreConfig{
Logger: logger.Named("initializeTransit"),
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
SealFunc: func() vault.Seal {
// Each core will create its own transit seal here. Later
// on it won't matter which one of these we end up using, since
// they were all created from the same transit key.
transitSeal = tss.MakeSeal(t, "transit-seal-key")
return transitSeal
},
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
// Join raft
if storage.IsRaft {
testhelpers.RaftClusterJoinNodesWithStoredKeys(t, cluster)
if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
t.Fatal(err)
}
}
testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
// Write a secret that we will read back out later.
_, err := client.Logical().Write(
"secret/foo",
map[string]interface{}{"zork": "quux"})
if err != nil {
t.Fatal(err)
}
// Seal the cluster
cluster.EnsureCoresSealed(t)
return cluster.RootToken, cluster.RecoveryKeys, transitSeal
}
func runTransit(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int,
rootToken string, transitSeal vault.Seal) {
var baseClusterPort = basePort + 10
// Start the cluster
var conf = vault.CoreConfig{
Logger: logger.Named("runTransit"),
Seal: transitSeal,
}
var opts = vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort,
SkipInit: true,
}
storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts)
cluster.Start()
defer func() {
storage.Cleanup(t, cluster)
cluster.Cleanup()
}()
leader := cluster.Cores[0]
client := leader.Client
client.SetToken(rootToken)
// Unseal. Even though we are using autounseal, we have to unseal
// explicitly because we are using SkipInit.
if storage.IsRaft {
provider := testhelpers.NewHardcodedServerAddressProvider(cluster, baseClusterPort)
testhelpers.SetRaftAddressProviders(t, cluster, provider)
for _, core := range cluster.Cores {
cluster.UnsealCoreWithStoredKeys(t, core)
}
// This is apparently necessary for the raft cluster to get itself
// situated.
time.Sleep(15 * time.Second)
if err := testhelpers.VerifyRaftConfiguration(leader, len(cluster.Cores)); err != nil {
t.Fatal(err)
}
} else {
if err := cluster.UnsealCoresWithError(true); err != nil {
t.Fatal(err)
}
}
testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
// Read the secret
secret, err := client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
}
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff)
}
// Seal the cluster
cluster.EnsureCoresSealed(t)
}
// TestShamir is a temporary test that exercises the reusable raft storage.
// It will be replace once we do the post-1.4 migration testing.
func TestShamir(t *testing.T) {
testVariousBackends(t, testShamir, true)
}
func testShamir(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int) {
rootToken, barrierKeys := initializeShamir(t, logger, storage, basePort)
runShamir(t, logger, storage, basePort, rootToken, barrierKeys)
}
// TestTransit is a temporary test that exercises the reusable raft storage.
// It will be replace once we do the post-1.4 migration testing.
func TestTransit(t *testing.T) {
testVariousBackends(t, testTransit, true)
}
func testTransit(
t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int) {
// Create the transit server.
tss := sealhelper.NewTransitSealServer(t)
defer tss.Cleanup()
tss.MakeKey(t, "transit-seal-key")
rootToken, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
runTransit(t, logger, storage, basePort, rootToken, transitSeal)
}

View File

@ -26,6 +26,8 @@ import (
"sync/atomic"
"time"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/armon/go-metrics"
hclog "github.com/hashicorp/go-hclog"
log "github.com/hashicorp/go-hclog"
@ -816,6 +818,12 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
}
}
func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
t.Fatal(err)
}
}
func (c *TestCluster) EnsureCoresSealed(t testing.T) {
t.Helper()
if err := c.ensureCoresSealed(); err != nil {
@ -959,14 +967,28 @@ type TestClusterOptions struct {
HandlerFunc func(*HandlerProperties) http.Handler
DefaultHandlerProperties HandlerProperties
// BaseListenAddress is used to assign ports in sequence to the listener
// of each core. It shoud be a string of the form "127.0.0.1:50000"
// BaseListenAddress is used to explicitly assign ports in sequence to the
// listener of each core. It shoud be a string of the form
// "127.0.0.1:20000"
//
// WARNING: Using an explicitly assigned port above 30000 may clash with
// ephemeral ports that have been assigned by the OS in other tests. The
// use of explictly assigned ports below 30000 is strongly recommended.
// In addition, you should be careful to use explictly assigned ports that
// do not clash with any other explicitly assigned ports in other tests.
BaseListenAddress string
// BaseClusterListenPort is used to assign ports in sequence to the
// cluster listener of each core. If BaseClusterListenPort is specified,
// then BaseListenAddress must also be specified. Each cluster listener
// will use the same host as the one specified in BaseListenAddress.
// BaseClusterListenPort is used to explicitly assign ports in sequence to
// the cluster listener of each core. If BaseClusterListenPort is
// specified, then BaseListenAddress must also be specified. Each cluster
// listener will use the same host as the one specified in
// BaseListenAddress.
//
// WARNING: Using an explicitly assigned port above 30000 may clash with
// ephemeral ports that have been assigned by the OS in other tests. The
// use of explictly assigned ports below 30000 is strongly recommended.
// In addition, you should be careful to use explictly assigned ports that
// do not clash with any other explicitly assigned ports in other tests.
BaseClusterListenPort int
NumCores int
@ -1338,6 +1360,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
coreConfig.CacheSize = base.CacheSize
coreConfig.PluginDirectory = base.PluginDirectory
coreConfig.Seal = base.Seal
coreConfig.UnwrapSeal = base.UnwrapSeal
coreConfig.DevToken = base.DevToken
coreConfig.EnableRaw = base.EnableRaw
coreConfig.DisableSealWrap = base.DisableSealWrap