Enable transit->shamir seal migration in Enterprise (#8737)
* Enable transit->shamir seal migration in Enterprise * prove that we can stop the transit cluster after migration is complete
This commit is contained in:
parent
add006d712
commit
5ae2f103d8
|
@ -1,161 +0,0 @@
|
|||
// +build !enterprise
|
||||
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
wrapping "github.com/hashicorp/go-kms-wrapping"
|
||||
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
)
|
||||
|
||||
func TestSealMigration_TransitToShamir(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("inmem", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.InmemBackendSetup)
|
||||
})
|
||||
|
||||
t.Run("file", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup)
|
||||
})
|
||||
|
||||
t.Run("consul", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup)
|
||||
})
|
||||
|
||||
t.Run("raft", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup)
|
||||
})
|
||||
}
|
||||
|
||||
func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
|
||||
|
||||
// Create the transit server.
|
||||
tcluster := sealhelper.NewTransitSealServer(t)
|
||||
defer tcluster.Cleanup()
|
||||
tcluster.MakeKey(t, "key1")
|
||||
var transitSeal vault.Seal
|
||||
|
||||
// Create a cluster that uses transit.
|
||||
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
|
||||
DisableSealWrap: true,
|
||||
}, &vault.TestClusterOptions{
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
SkipInit: true,
|
||||
NumCores: 3,
|
||||
SealFunc: func() vault.Seal {
|
||||
transitSeal = tcluster.MakeSeal(t, "key1")
|
||||
return transitSeal
|
||||
},
|
||||
},
|
||||
setup,
|
||||
)
|
||||
opts.SetupFunc = nil
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
// Initialize the cluster, and fetch the recovery keys.
|
||||
client := cluster.Cores[0].Client
|
||||
initResp, err := client.Sys().Init(&api.InitRequest{
|
||||
RecoveryShares: 5,
|
||||
RecoveryThreshold: 3,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, k := range initResp.RecoveryKeysB64 {
|
||||
b, _ := base64.RawStdEncoding.DecodeString(k)
|
||||
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
|
||||
}
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
rootToken := initResp.RootToken
|
||||
client.SetToken(rootToken)
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a Shamir seal.
|
||||
logger := cluster.Logger.Named("shamir")
|
||||
shamirSeal := vault.NewDefaultSeal(&seal.Access{
|
||||
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
|
||||
Logger: logger,
|
||||
}),
|
||||
})
|
||||
|
||||
// Transition to Shamir seal.
|
||||
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, transitSeal); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Unseal and migrate to Shamir.
|
||||
// Although we're unsealing using the recovery keys, this is still an
|
||||
// autounseal; if we stopped the transit cluster this would fail.
|
||||
var resp *api.SealStatusResponse
|
||||
for _, key := range initResp.RecoveryKeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
if err == nil {
|
||||
t.Fatal("expected error due to lack of migrate parameter")
|
||||
}
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
// Seal the cluster.
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Nuke the transit server; assign nil to Cores so the deferred Cleanup
|
||||
// doesn't break.
|
||||
tcluster.Cleanup()
|
||||
tcluster.Cores = nil
|
||||
|
||||
// Unseal the cluster. Now the recovery keys are actually the barrier
|
||||
// unseal keys.
|
||||
for _, key := range initResp.RecoveryKeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly.
|
||||
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil recovery config, got: %#v", r)
|
||||
}
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
wrapping "github.com/hashicorp/go-kms-wrapping"
|
||||
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
|
||||
|
@ -289,29 +290,6 @@ func testSealMigrationShamirToTestSeal(t *testing.T, setup teststorage.ClusterSe
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
////// Seal the transit cluster; we expect the unseal of our main cluster
|
||||
////// to fail as a result.
|
||||
////tcluster.EnsureCoresSealed(t)
|
||||
|
||||
////// Verify that we cannot unseal. Now the barrier unseal keys are actually
|
||||
////// the recovery keys.
|
||||
////for _, key := range initResp.KeysB64 {
|
||||
//// resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
//// if err != nil {
|
||||
//// break
|
||||
//// }
|
||||
//// if resp == nil || !resp.Sealed {
|
||||
//// break
|
||||
//// }
|
||||
////}
|
||||
////if err == nil || resp != nil {
|
||||
//// t.Fatalf("expected sealed state; got %#v", resp)
|
||||
////}
|
||||
|
||||
////// Unseal the transit server; we expect the unseal to work now on our main
|
||||
////// cluster.
|
||||
////tcluster.UnsealCores(t)
|
||||
|
||||
// Verify that we can unseal.
|
||||
for _, key := range initResp.KeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
|
@ -342,27 +320,31 @@ func TestSealMigration_TransitToTestSeal(t *testing.T) {
|
|||
testSealMigrationTransitToTestSeal(t, teststorage.InmemBackendSetup)
|
||||
})
|
||||
|
||||
//t.Run("file", func(t *testing.T) {
|
||||
// t.Parallel()
|
||||
// testSealMigrationTransitToTestSeal(t, teststorage.FileBackendSetup)
|
||||
//})
|
||||
t.Run("file", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToTestSeal(t, teststorage.FileBackendSetup)
|
||||
})
|
||||
|
||||
//t.Run("consul", func(t *testing.T) {
|
||||
// t.Parallel()
|
||||
// testSealMigrationTransitToTestSeal(t, teststorage.ConsulBackendSetup)
|
||||
//})
|
||||
t.Run("consul", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToTestSeal(t, teststorage.ConsulBackendSetup)
|
||||
})
|
||||
|
||||
//t.Run("raft", func(t *testing.T) {
|
||||
// t.Parallel()
|
||||
// testSealMigrationTransitToTestSeal(t, teststorage.RaftBackendSetup)
|
||||
//})
|
||||
t.Run("raft", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToTestSeal(t, teststorage.RaftBackendSetup)
|
||||
})
|
||||
}
|
||||
|
||||
func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterSetupMutator) {
|
||||
|
||||
// Create the transit server.
|
||||
tcluster := sealhelper.NewTransitSealServer(t)
|
||||
defer tcluster.Cleanup()
|
||||
defer func() {
|
||||
if tcluster != nil {
|
||||
tcluster.Cleanup()
|
||||
}
|
||||
}()
|
||||
tcluster.MakeKey(t, "key1")
|
||||
var transitSeal vault.Seal
|
||||
|
||||
|
@ -441,10 +423,166 @@ func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterS
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Nuke the transit server; assign nil to Cores so the deferred Cleanup
|
||||
// doesn't break.
|
||||
// Unseal the cluster. Now the recovery keys are actually the barrier
|
||||
// unseal keys.
|
||||
for _, key := range initResp.RecoveryKeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
// Make sure the seal configs were updated correctly.
|
||||
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
|
||||
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
|
||||
|
||||
// Now that migration is done, we can stop the transit cluster, since we
|
||||
// can seal/unseal without it.
|
||||
tcluster.Cleanup()
|
||||
tcluster.Cores = nil
|
||||
tcluster = nil
|
||||
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, key := range initResp.RecoveryKeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSealMigration_TransitToShamir(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Run("inmem", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.InmemBackendSetup)
|
||||
})
|
||||
|
||||
t.Run("file", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup)
|
||||
})
|
||||
|
||||
t.Run("consul", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup)
|
||||
})
|
||||
|
||||
t.Run("raft", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup)
|
||||
})
|
||||
}
|
||||
|
||||
func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
|
||||
|
||||
// Create the transit server.
|
||||
tcluster := sealhelper.NewTransitSealServer(t)
|
||||
defer func() {
|
||||
if tcluster != nil {
|
||||
tcluster.Cleanup()
|
||||
}
|
||||
}()
|
||||
tcluster.MakeKey(t, "key1")
|
||||
var transitSeal vault.Seal
|
||||
|
||||
// Create a cluster that uses transit.
|
||||
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
|
||||
DisableSealWrap: true,
|
||||
}, &vault.TestClusterOptions{
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
SkipInit: true,
|
||||
NumCores: 3,
|
||||
SealFunc: func() vault.Seal {
|
||||
transitSeal = tcluster.MakeSeal(t, "key1")
|
||||
return transitSeal
|
||||
},
|
||||
},
|
||||
setup,
|
||||
)
|
||||
opts.SetupFunc = nil
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
// Initialize the cluster, and fetch the recovery keys.
|
||||
client := cluster.Cores[0].Client
|
||||
initResp, err := client.Sys().Init(&api.InitRequest{
|
||||
RecoveryShares: 5,
|
||||
RecoveryThreshold: 3,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, k := range initResp.RecoveryKeysB64 {
|
||||
b, _ := base64.RawStdEncoding.DecodeString(k)
|
||||
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
|
||||
}
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
rootToken := initResp.RootToken
|
||||
client.SetToken(rootToken)
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a Shamir seal.
|
||||
logger := cluster.Logger.Named("shamir")
|
||||
shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{
|
||||
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
|
||||
Logger: logger,
|
||||
}),
|
||||
})
|
||||
|
||||
// Transition to Shamir seal.
|
||||
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, transitSeal); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Unseal and migrate to Shamir.
|
||||
// Although we're unsealing using the recovery keys, this is still an
|
||||
// autounseal; if we stopped the transit cluster this would fail.
|
||||
var resp *api.SealStatusResponse
|
||||
for _, key := range initResp.RecoveryKeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
if err == nil {
|
||||
t.Fatal("expected error due to lack of migrate parameter")
|
||||
}
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
// Seal the cluster.
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Unseal the cluster. Now the recovery keys are actually the barrier
|
||||
// unseal keys.
|
||||
|
@ -460,14 +598,38 @@ func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterS
|
|||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
// Make sure the seal configs were updated correctly.
|
||||
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
|
||||
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
|
||||
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil recovery config, got: %#v", r)
|
||||
}
|
||||
|
||||
// Now that migration is done, we can stop the transit cluster, since we
|
||||
// can seal/unseal without it.
|
||||
tcluster.Cleanup()
|
||||
tcluster = nil
|
||||
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, key := range initResp.RecoveryKeysB64 {
|
||||
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
onEnterprise = false
|
||||
createSecureRandomReaderFunc = createSecureRandomReader
|
||||
adjustCoreConfigForEnt = adjustCoreConfigForEntNoop
|
||||
)
|
||||
|
@ -62,10 +61,6 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
|
|||
return errors.New(`Recovery seal configuration not found for existing seal`)
|
||||
}
|
||||
|
||||
if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir {
|
||||
return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise")
|
||||
}
|
||||
|
||||
var migrationSeal vault.Seal
|
||||
var newSeal vault.Seal
|
||||
|
||||
|
|
Loading…
Reference in New Issue