Seal migration with Raft (#8103)

* Seal migration after unsealing

* Refactor migration fields migrationInformation in core

* Perform seal migration as part of postUnseal

* Remove the sleep logic

* Use proper seal in the unseal function

* Fix migration from Auto to Shamir

* Fix the recovery config missing issue

* Address the non-ha migration case

* Fix the multi cluster case

* Avoid re-running seal migration

* Run the post migration code in new leaders

* Fix the issue of wrong recovery being set

* Address review feedback

* Add more complete testing coverage for seal migrations.   (#8247)

* Add more complete testing coverage for seal migrations.  Also remove VAULT_ACC gate from some tests that just depend on docker, cleanup dangling recovery config in storage after migration, and fix a call in adjustCoreForSealMigration that seems broken.

* Fix the issue of wrong recovery key being set

* Adapt tests to work with multiple cores.

* Add missing line to disable raft join.

Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com>

* Fix all known issues

* Remove warning

* Review feedback.

* Revert my previous change that broke raft tests.  We'll need to come back and at least comment
this once we better understand why it's needed.

* Don't allow migration between same types for now

* Disable auto to auto tests for now since it uses migration between same types which is not allowed

* Update vault/core.go

Co-Authored-By: Brian Kassouf <briankassouf@users.noreply.github.com>

* Add migration logs

* Address review comments

* Add the recovery config check back

* Skip a few steps if migration is already done

* Return from waitForLeadership if migration fails

Co-authored-by: ncabatoff <nick.cabatoff@gmail.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
This commit is contained in:
Vishal Nayak 2020-02-13 16:27:31 -05:00 committed by GitHub
parent dac3382e15
commit c87d34d1a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 674 additions and 624 deletions

View File

@ -0,0 +1,152 @@
// +build !enterprise
package command
import (
"context"
"encoding/base64"
"testing"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func TestSealMigration_AutoToShamir(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationAutoToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
tcluster := newTransitSealServer(t)
defer tcluster.Cleanup()
tcluster.makeKey(t, "key1")
var seals []vault.Seal
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
tseal := tcluster.makeSeal(t, "key1")
seals = append(seals, tseal)
return tseal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
logger := cluster.Logger.Named("shamir")
shamirSeal := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger,
}),
})
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, seals[0]); err != nil {
t.Fatal(err)
}
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
tcluster.Cleanup()
// Assign nil to Cores so the deferred Cleanup doesn't break.
tcluster.Cores = nil
// Now the recovery keys are actually the barrier unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
}

View File

@ -1,49 +1,289 @@
// +build !enterprise
package command
import (
"context"
"encoding/base64"
"path"
"testing"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/shamir"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/transit"
commandseal "github.com/hashicorp/vault/command/server/seal"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func TestSealMigrationAutoToShamir(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
phys, err := physInmem.NewInmem(nil, logger)
if err != nil {
type transitSealServer struct {
*vault.TestCluster
}
func newTransitSealServer(t *testing.T) *transitSealServer {
conf := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"transit": transit.Factory,
},
}
opts := &vault.TestClusterOptions{
NumCores: 1,
HandlerFunc: vaulthttp.Handler,
Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit"),
}
teststorage.InmemBackendSetup(conf, opts)
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
if err := cluster.Cores[0].Client.Sys().Mount("transit", &api.MountInput{
Type: "transit",
}); err != nil {
t.Fatal(err)
}
haPhys, err := physInmem.NewInmemHA(nil, logger)
if err != nil {
return &transitSealServer{cluster}
}
func (tss *transitSealServer) makeKey(t *testing.T, key string) {
client := tss.Cores[0].Client
// Create default aesgcm key
if _, err := client.Logical().Write(path.Join("transit", "keys", key), nil); err != nil {
t.Fatal(err)
}
autoSeal := vault.NewAutoSeal(seal.NewTestSeal(nil))
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
Seal: autoSeal,
Physical: phys,
HAPhysical: haPhys.(physical.HABackend),
if _, err := client.Logical().Write(path.Join("transit", "keys", key, "config"), map[string]interface{}{
"deletion_allowed": true,
}); err != nil {
t.Fatal(err)
}
}
func (tss *transitSealServer) makeSeal(t *testing.T, key string) vault.Seal {
client := tss.Cores[0].Client
wrapperConfig := map[string]string{
"address": client.Address(),
"token": client.Token(),
"mount_path": "transit",
"key_name": key,
"tls_ca_cert": tss.CACertPEMFile,
}
transitSeal, _, err := commandseal.GetTransitKMSFunc(nil, wrapperConfig)
if err != nil {
t.Fatalf("error setting wrapper config: %v", err)
}
return vault.NewAutoSeal(&seal.Access{
Wrapper: transitSeal,
})
}
func verifyBarrierConfig(t *testing.T, cfg *vault.SealConfig, sealType string, shares, threshold, stored int) {
t.Helper()
if cfg.Type != sealType {
t.Fatalf("bad seal config: %#v, expected type=%q", cfg, sealType)
}
if cfg.SecretShares != shares {
t.Fatalf("bad seal config: %#v, expected SecretShares=%d", cfg, shares)
}
if cfg.SecretThreshold != threshold {
t.Fatalf("bad seal config: %#v, expected SecretThreshold=%d", cfg, threshold)
}
if cfg.StoredShares != stored {
t.Fatalf("bad seal config: %#v, expected StoredShares=%d", cfg, stored)
}
}
func TestSealMigration_ShamirToAuto(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToAuto(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToAuto(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToAuto(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationShamirToAuto(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationShamirToAuto(t *testing.T, setup teststorage.ClusterSetupMutator) {
tcluster := newTransitSealServer(t)
defer tcluster.Cleanup()
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
Logger: logger,
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 1,
NumCores: 3,
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
tcluster.makeKey(t, "key1")
autoSeal := tcluster.makeSeal(t, "key1")
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
SecretShares: 5,
SecretThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
var resp *api.SealStatusResponse
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
if err := adjustCoreForSealMigration(cluster.Logger, cluster.Cores[0].Core, autoSeal, nil); err != nil {
t.Fatal(err)
}
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Now the barrier unseal keys are actually the recovery keys.
// Seal the transit cluster; we expect the unseal of our other cluster
// to fail as a result.
tcluster.EnsureCoresSealed(t)
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
break
}
if resp == nil || !resp.Sealed {
break
}
}
if err == nil || resp != nil {
t.Fatalf("expected sealed state; got %#v", resp)
}
tcluster.UnsealCores(t)
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
// Make sure the seal configs were updated correctly
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
}
/*
func TestSealMigration_AutoToAuto(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToAuto(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToAuto(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToAuto(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToAuto(t, teststorage.RaftBackendSetup)
})
}
*/
func testSealMigrationAutoToAuto(t *testing.T, setup teststorage.ClusterSetupMutator) {
tcluster := newTransitSealServer(t)
defer tcluster.Cleanup()
tcluster.makeKey(t, "key1")
tcluster.makeKey(t, "key2")
var seals []vault.Seal
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
tseal := tcluster.makeSeal(t, "key1")
seals = append(seals, tseal)
return tseal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
@ -55,486 +295,60 @@ func TestSealMigrationAutoToShamir(t *testing.T) {
if err != nil {
t.Fatal(err)
}
rootToken := initResp.RootToken
client.SetToken(rootToken)
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
keys := initResp.RecoveryKeysB64
rootToken := initResp.RootToken
core := cluster.Cores[0].Core
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
shamirSeal := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
shamirSeal.SetCore(core)
if err := adjustCoreForSealMigration(logger, core, shamirSeal, autoSeal); err != nil {
logger := cluster.Logger.Named("shamir")
autoSeal2 := tcluster.makeSeal(t, "key2")
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, autoSeal2, seals[0]); err != nil {
t.Fatal(err)
}
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
if resp == nil || !resp.Sealed {
break
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
unsealOpts.Migrate = false
for _, key := range keys {
unsealOpts.Key = key
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
break
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
}
func TestSealMigration(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
phys, err := physInmem.NewInmem(nil, logger)
// Delete the original seal's transit key.
_, err = tcluster.Cores[0].Client.Logical().Delete(path.Join("transit", "keys", "key1"))
if err != nil {
t.Fatal(err)
}
haPhys, err := physInmem.NewInmemHA(nil, logger)
err = cluster.Cores[0].Core.UnsealWithStoredKeys(context.Background())
if err != nil {
t.Fatal(err)
}
wrapper := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
coreConfig := &vault.CoreConfig{
Seal: wrapper,
Physical: phys,
HAPhysical: haPhys.(physical.HABackend),
DisableSealWrap: true,
}
clusterConfig := &vault.TestClusterOptions{
Logger: logger,
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 1,
}
ctx := context.Background()
var keys []string
var rootToken string
{
logger.Info("integ: start up as normal with shamir seal, init it")
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
coreConfig = cluster.Cores[0].CoreConfig
// Init
resp, err := client.Sys().Init(&api.InitRequest{
SecretShares: 2,
SecretThreshold: 2,
})
if err != nil {
t.Fatal(err)
}
keys = resp.KeysB64
rootToken = resp.RootToken
// Now seal
cluster.Cleanup()
// This will prevent cleanup from running again on the defer
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: start up as normal with shamir seal and unseal, make sure everything is normal")
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
for _, key := range keys {
resp, err = client.Sys().Unseal(key)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatal("expected unsealed state")
}
cluster.Cleanup()
cluster.Cores = nil
}
var autoSeal vault.Seal
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: creating an autoseal and activating migration")
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
newSeal := vault.NewAutoSeal(seal.NewTestSeal(nil))
newSeal.SetCore(core)
autoSeal = newSeal
if err := adjustCoreForSealMigration(logger, core, newSeal, nil); err != nil {
t.Fatal(err)
}
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := autoSeal.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != autoSeal.BarrierType() {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
r, err := autoSeal.RecoveryConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if r.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.StoredShares != 0 {
t.Fatalf("bad seal config: %#v", r)
}
cluster.Cleanup()
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: verify autoseal and recovery key usage")
coreConfig.Seal = autoSeal
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
client := cluster.Cores[0].Client
client.SetToken(rootToken)
if err := core.UnsealWithStoredKeys(ctx); err != nil {
t.Fatal(err)
}
resp, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
keyParts := [][]byte{}
for _, key := range keys {
raw, err := base64.StdEncoding.DecodeString(key)
if err != nil {
t.Fatal(err)
}
keyParts = append(keyParts, raw)
}
recoveredKey, err := shamir.Combine(keyParts)
if err != nil {
t.Fatal(err)
}
sealAccess := core.SealAccess()
if err := sealAccess.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
t.Fatal(err)
}
cluster.Cleanup()
cluster.Cores = nil
}
// We should see stored barrier keys; after the sixth test, we shouldn't
if entry, err := phys.Get(ctx, vault.StoredBarrierKeysPath); err != nil || entry == nil {
t.Fatalf("expected nil error and non-nil entry, got error %#v and entry %#v", err, entry)
}
altTestSeal := seal.NewTestSeal(nil)
altTestSeal.SetType("test-alternate")
altSeal := vault.NewAutoSeal(altTestSeal)
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: migrate from auto-seal to auto-seal")
coreConfig.Seal = autoSeal
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
if err := adjustCoreForSealMigration(logger, core, altSeal, autoSeal); err != nil {
t.Fatal(err)
}
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := altSeal.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != altSeal.BarrierType() {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
r, err := altSeal.RecoveryConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if r.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.StoredShares != 0 {
t.Fatalf("bad seal config: %#v", r)
}
cluster.Cleanup()
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: create a Shamir seal and activate migration; verify it doesn't work if disabled isn't set.")
coreConfig.Seal = altSeal
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
wrapper := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
if err := adjustCoreForSealMigration(logger, core, wrapper, altSeal); err != nil {
t.Fatal(err)
}
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := wrapper.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
_, err = wrapper.RecoveryConfig(context.Background())
if err == nil {
t.Fatal("expected error")
}
cluster.Cleanup()
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: verify autoseal is off and the expected key shares work")
coreConfig.Seal = wrapper
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
client := cluster.Cores[0].Client
client.SetToken(rootToken)
if err := core.UnsealWithStoredKeys(ctx); err != nil {
t.Fatal(err)
}
resp, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
t.Fatalf("expected sealed state; got %#v", *resp)
}
for _, key := range keys {
resp, err = client.Sys().Unseal(key)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatal("expected unsealed state")
}
cluster.Cleanup()
cluster.Cores = nil
}
}

View File

@ -3,7 +3,6 @@ package seal_test
import (
"context"
"fmt"
"os"
"path"
"reflect"
"strings"
@ -17,9 +16,6 @@ import (
)
func TestTransitWrapper_Lifecycle(t *testing.T) {
if os.Getenv("VAULT_ACC") == "" {
t.Skip()
}
cleanup, retAddress, token, mountPath, keyName, _ := prepareTestContainer(t)
defer cleanup()
@ -53,9 +49,6 @@ func TestTransitWrapper_Lifecycle(t *testing.T) {
}
func TestTransitSeal_TokenRenewal(t *testing.T) {
if os.Getenv("VAULT_ACC") == "" {
t.Skip()
}
cleanup, retAddress, token, mountPath, keyName, tlsConfig := prepareTestContainer(t)
defer cleanup()

View File

@ -54,14 +54,6 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
}
}
if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
// In this case our migration seal is set so we are using it
// (potentially) for unwrapping. Set it on core for that purpose then
// exit.
core.SetSealsForMigration(nil, nil, unwrapSeal)
return nil
}
if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
return errors.New(`Recovery seal configuration not found for existing seal`)
}
@ -93,6 +85,18 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
// newSeal will be the barrierSeal
newSeal = barrierSeal
if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
return errors.New("Migrating between same seal types is currently not supported")
}
if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
// In this case our migration seal is set so we are using it
// (potentially) for unwrapping. Set it on core for that purpose then
// exit.
core.SetSealsForMigration(nil, nil, unwrapSeal)
return nil
}
// Set the appropriate barrier and recovery configs.
switch {
case migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/testhelpers/consul"
vaulthttp "github.com/hashicorp/vault/http"
physConsul "github.com/hashicorp/vault/physical/consul"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/physical"
@ -158,7 +159,27 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.KeepStandbysSealed = true
opts.PhysicalFactory = MakeRaftBackend
opts.SetupFunc = func(t testing.T, c *vault.TestCluster) {
testhelpers.RaftClusterJoinNodes(t, c)
time.Sleep(15 * time.Second)
if opts.NumCores != 1 {
testhelpers.RaftClusterJoinNodes(t, c)
time.Sleep(15 * time.Second)
}
}
}
func ClusterSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup ClusterSetupMutator) (*vault.CoreConfig, *vault.TestClusterOptions) {
var localConf vault.CoreConfig
if conf != nil {
localConf = *conf
}
localOpts := vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
}
if opts != nil {
localOpts = *opts
}
if setup == nil {
setup = InmemBackendSetup
}
setup(&localConf, &localOpts)
return &localConf, &localOpts
}

View File

@ -23,8 +23,8 @@ import (
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
multierror "github.com/hashicorp/go-multierror"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/command/server"
@ -45,7 +45,7 @@ import (
"github.com/hashicorp/vault/shamir"
"github.com/hashicorp/vault/vault/cluster"
vaultseal "github.com/hashicorp/vault/vault/seal"
cache "github.com/patrickmn/go-cache"
"github.com/patrickmn/go-cache"
"google.golang.org/grpc"
)
@ -165,6 +165,19 @@ type raftInformation struct {
joinInProgress bool
}
type migrationInformation struct {
// seal to use during a migration operation. It is the
// seal we're migrating *from*.
seal Seal
masterKey []byte
recoveryKey []byte
// shamirCombinedKey is the key that is used to store master key when shamir
// seal is in use. This will be set as the recovery key when a migration happens
// from shamir to auto-seal.
shamirCombinedKey []byte
}
// Core is used as the central manager of Vault activity. It is the primary point of
// interface for API handlers and is responsible for managing the logical and physical
// backends, router, security barrier, and audit trails.
@ -218,9 +231,9 @@ type Core struct {
// peer to an existing raft cluster
raftInfo *raftInformation
// migrationSeal is the seal to use during a migration operation. It is the
// seal we're migrating *from*.
migrationSeal Seal
// migrationInfo is used during a seal migration. This contains information
// about the seal we are migrating *from*.
migrationInfo *migrationInformation
sealMigrated *uint32
// unwrapSeal is the seal to use on Enterprise to unwrap values wrapped
@ -1006,9 +1019,9 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
}
sealToUse := c.seal
if c.migrationSeal != nil {
if c.migrationInfo != nil {
c.logger.Info("unsealing using migration seal")
sealToUse = c.migrationSeal
sealToUse = c.migrationInfo.seal
}
// unsealPart returns either a master key (legacy shamir) or an unseal
@ -1019,30 +1032,27 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
}
if masterKey != nil {
if c.seal.BarrierType() == wrapping.Shamir {
if sealToUse.BarrierType() == wrapping.Shamir && c.migrationInfo == nil {
// If this is a legacy shamir seal this serves no purpose but it
// doesn't hurt.
err = c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(masterKey)
err = sealToUse.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(masterKey)
if err != nil {
return false, err
}
}
if !c.isRaftUnseal() {
if c.seal.BarrierType() == wrapping.Shamir {
cfg, err := c.seal.BarrierConfig(ctx)
if sealToUse.BarrierType() == wrapping.Shamir {
cfg, err := sealToUse.BarrierConfig(ctx)
if err != nil {
return false, err
}
// If there is a stored key, retrieve it.
if cfg.StoredShares > 0 {
if err != nil {
return false, err
}
// Here's where we actually test that the provided unseal
// key is valid: can it decrypt the stored master key?
storedKeys, err := c.seal.GetStoredKeys(ctx)
storedKeys, err := sealToUse.GetStoredKeys(ctx)
if err != nil {
return false, err
}
@ -1070,7 +1080,7 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
default:
// This is the case for manual raft join. Send the answer to the leader node and
// wait for data to start streaming in.
if err := c.joinRaftSendAnswer(ctx, c.seal.GetAccess(), c.raftInfo); err != nil {
if err := c.joinRaftSendAnswer(ctx, sealToUse.GetAccess(), c.raftInfo); err != nil {
return false, err
}
// Reset the state
@ -1079,7 +1089,7 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
go func() {
keyringFound := false
haveMasterKey := c.seal.StoredKeysSupported() != vaultseal.StoredKeysSupportedShamirMaster
haveMasterKey := sealToUse.StoredKeysSupported() != vaultseal.StoredKeysSupportedShamirMaster
defer func() {
if keyringFound && haveMasterKey {
_, err := c.unsealInternal(ctx, masterKey)
@ -1103,7 +1113,7 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
}
}
if !haveMasterKey {
keys, err := c.seal.GetStoredKeys(ctx)
keys, err := sealToUse.GetStoredKeys(ctx)
if err != nil {
c.logger.Error("failed to read master key", "error", err)
return
@ -1156,7 +1166,7 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover
var err error
switch {
case seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil):
case seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationInfo != nil):
config, err = seal.RecoveryConfig(ctx)
case c.isRaftUnseal():
// Ignore follower's seal config and refer to leader's barrier
@ -1201,7 +1211,7 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover
}
}
if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil) {
if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationInfo != nil) {
// Verify recovery key.
if err := seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
return nil, err
@ -1234,12 +1244,15 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover
} else {
masterKey = recoveredKey
}
newRecoveryKey := masterKey
// If we have a migration seal, now's the time!
if c.migrationSeal != nil {
switch {
case c.migrationInfo != nil:
// Make copies of fields that gets passed on to migration via migrationInfo to
// avoid accidental reference changes
c.migrationInfo.shamirCombinedKey = make([]byte, len(recoveredKey))
copy(c.migrationInfo.shamirCombinedKey, recoveredKey)
if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedShamirMaster {
err = seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(masterKey)
err = seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(recoveredKey)
if err != nil {
return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err)
}
@ -1249,112 +1262,138 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover
}
masterKey = storedKeys[0]
}
// Unseal the barrier so we can rekey
if err := c.barrier.Unseal(ctx, masterKey); err != nil {
return nil, errwrap.Wrapf("error unsealing barrier with constructed master key: {{err}}", err)
}
defer c.barrier.Seal()
switch {
case c.migrationSeal.RecoveryKeySupported() && c.seal.RecoveryKeySupported():
// Set the recovery and barrier keys to be the same.
recoveryKey, err := c.migrationSeal.RecoveryKey(ctx)
if err != nil {
return nil, errwrap.Wrapf("error getting recovery key to set on new seal: {{err}}", err)
}
if err := c.seal.SetRecoveryKey(ctx, recoveryKey); err != nil {
return nil, errwrap.Wrapf("error setting new recovery key information during migrate: {{err}}", err)
}
barrierKeys, err := c.migrationSeal.GetStoredKeys(ctx)
if err != nil {
return nil, errwrap.Wrapf("error getting stored keys to set on new seal: {{err}}", err)
}
if err := c.seal.SetStoredKeys(ctx, barrierKeys); err != nil {
return nil, errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err)
}
case c.migrationSeal.RecoveryKeySupported():
// Auto to Shamir, since recovery key isn't supported on new seal
// In this case we have to ensure that the recovery information was
// set properly.
if recoveryKey == nil {
return nil, errors.New("did not get expected recovery information to set new seal during migration")
}
// We have recovery keys; we're going to use them as the new
// shamir KeK.
err = c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(recoveryKey)
if err != nil {
return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err)
}
if err := c.seal.SetStoredKeys(ctx, [][]byte{masterKey}); err != nil {
return nil, errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err)
}
masterKey = recoveryKey
case c.seal.RecoveryKeySupported():
// The new seal will have recovery keys; we set it to the existing
// master key, so barrier key shares -> recovery key shares
if err := c.seal.SetRecoveryKey(ctx, newRecoveryKey); err != nil {
return nil, errwrap.Wrapf("error setting new recovery key information: {{err}}", err)
}
// Generate a new master key
newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader)
if err != nil {
return nil, errwrap.Wrapf("error generating new master key: {{err}}", err)
}
// Rekey the barrier
if err := c.barrier.Rekey(ctx, newMasterKey); err != nil {
return nil, errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err)
}
// Store the new master key
if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil {
return nil, errwrap.Wrapf("error storing new master key: {[err}}", err)
}
// Return the new key so it can be used to unlock the barrier
masterKey = newMasterKey
default:
return nil, errors.New("unhandled migration case (shamir to shamir)")
}
// At this point we've swapped things around and need to ensure we
// don't migrate again
c.migrationSeal = nil
atomic.StoreUint32(c.sealMigrated, 1)
// Ensure we populate the new values
bc, err := c.seal.BarrierConfig(ctx)
if err != nil {
return nil, errwrap.Wrapf("error fetching barrier config after migration: {{err}}", err)
}
if err := c.seal.SetBarrierConfig(ctx, bc); err != nil {
return nil, errwrap.Wrapf("error storing barrier config after migration: {{err}}", err)
}
if c.seal.RecoveryKeySupported() {
rc, err := c.seal.RecoveryConfig(ctx)
if err != nil {
return nil, errwrap.Wrapf("error fetching recovery config after migration: {{err}}", err)
}
if err := c.seal.SetRecoveryConfig(ctx, rc); err != nil {
return nil, errwrap.Wrapf("error storing recovery config after migration: {{err}}", err)
}
}
c.migrationInfo.masterKey = make([]byte, len(masterKey))
copy(c.migrationInfo.masterKey, masterKey)
c.migrationInfo.recoveryKey = make([]byte, len(recoveryKey))
copy(c.migrationInfo.recoveryKey, recoveryKey)
}
return masterKey, nil
}
func (c *Core) migrateSeal(ctx context.Context) error {
if c.migrationInfo == nil {
return nil
}
existBarrierSealConfig, _, err := c.PhysicalSealConfigs(ctx)
if err != nil {
return fmt.Errorf("failed to read existing seal configuration during migration: %v", err)
}
if existBarrierSealConfig.Type != c.migrationInfo.seal.BarrierType() {
// If the existing barrier type is not the same as the type of seal we are
// migrating from, it can be concluded that migration has already been performed
c.logger.Info("migration is already performed since existing seal type and source seal types are different")
c.migrationInfo = nil
atomic.StoreUint32(c.sealMigrated, 1)
return nil
}
c.logger.Info("seal migration initiated")
switch {
case c.migrationInfo.seal.RecoveryKeySupported() && c.seal.RecoveryKeySupported():
c.logger.Info("migrating from one auto-unseal to another", "from", c.migrationInfo.seal.BarrierType(), "to", c.seal.BarrierType())
// Set the recovery and barrier keys to be the same.
recoveryKey, err := c.migrationInfo.seal.RecoveryKey(ctx)
if err != nil {
return errwrap.Wrapf("error getting recovery key to set on new seal: {{err}}", err)
}
if err := c.seal.SetRecoveryKey(ctx, recoveryKey); err != nil {
return errwrap.Wrapf("error setting new recovery key information during migrate: {{err}}", err)
}
barrierKeys, err := c.migrationInfo.seal.GetStoredKeys(ctx)
if err != nil {
return errwrap.Wrapf("error getting stored keys to set on new seal: {{err}}", err)
}
if err := c.seal.SetStoredKeys(ctx, barrierKeys); err != nil {
return errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err)
}
case c.migrationInfo.seal.RecoveryKeySupported():
c.logger.Info("migrating from one auto-unseal to shamir", "from", c.migrationInfo.seal.BarrierType())
// Auto to Shamir, since recovery key isn't supported on new seal
// In this case we have to ensure that the recovery information was
// set properly.
if c.migrationInfo.recoveryKey == nil {
return errors.New("did not get expected recovery information to set new seal during migration")
}
// We have recovery keys; we're going to use them as the new
// shamir KeK.
err := c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(c.migrationInfo.recoveryKey)
if err != nil {
return errwrap.Wrapf("failed to set master key in seal: {{err}}", err)
}
if err := c.seal.SetStoredKeys(ctx, [][]byte{c.migrationInfo.masterKey}); err != nil {
return errwrap.Wrapf("error setting new barrier key information during migrate: {{err}}", err)
}
case c.seal.RecoveryKeySupported():
c.logger.Info("migrating from shamir to auto-unseal", "to", c.seal.BarrierType())
// Migration is happening from shamir -> auto. In this case use the shamir
// combined key that was used to store the master key as the new recovery key.
if err := c.seal.SetRecoveryKey(ctx, c.migrationInfo.shamirCombinedKey); err != nil {
return errwrap.Wrapf("error setting new recovery key information: {{err}}", err)
}
// Generate a new master key
newMasterKey, err := c.barrier.GenerateKey(c.secureRandomReader)
if err != nil {
return errwrap.Wrapf("error generating new master key: {{err}}", err)
}
// Rekey the barrier
if err := c.barrier.Rekey(ctx, newMasterKey); err != nil {
return errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err)
}
// Store the new master key
if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil {
return errwrap.Wrapf("error storing new master key: {{err}}", err)
}
default:
return errors.New("unhandled migration case (shamir to shamir)")
}
// At this point we've swapped things around and need to ensure we
// don't migrate again
c.migrationInfo = nil
atomic.StoreUint32(c.sealMigrated, 1)
// Ensure we populate the new values
bc, err := c.seal.BarrierConfig(ctx)
if err != nil {
return errwrap.Wrapf("error fetching barrier config after migration: {{err}}", err)
}
if err := c.seal.SetBarrierConfig(ctx, bc); err != nil {
return errwrap.Wrapf("error storing barrier config after migration: {{err}}", err)
}
if c.seal.RecoveryKeySupported() {
rc, err := c.seal.RecoveryConfig(ctx)
if err != nil {
return errwrap.Wrapf("error fetching recovery config after migration: {{err}}", err)
}
if err := c.seal.SetRecoveryConfig(ctx, rc); err != nil {
return errwrap.Wrapf("error storing recovery config after migration: {{err}}", err)
}
} else if err := c.physical.Delete(ctx, recoverySealConfigPlaintextPath); err != nil {
return errwrap.Wrapf("failed to delete old recovery seal configuration during migration: {{err}}", err)
}
c.logger.Info("seal migration complete")
return nil
}
// unsealInternal takes in the master key and attempts to unseal the barrier.
// N.B.: This must be called with the state write lock held.
func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, error) {
@ -1388,6 +1427,13 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro
return false, err
}
if err := c.migrateSeal(ctx); err != nil {
c.logger.Error("seal migration error", "error", err)
c.barrier.Seal()
c.logger.Warn("vault is sealed")
return false, err
}
ctx, ctxCancel := context.WithCancel(namespace.RootContext(nil))
if err := c.postUnseal(ctx, ctxCancel, standardUnsealStrategy{}); err != nil {
c.logger.Error("post-unseal setup failed", "error", err)
@ -1396,6 +1442,11 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro
return false, err
}
// Force a cache bust here, which will also run migration code
if c.seal.RecoveryKeySupported() {
c.seal.SetRecoveryConfig(ctx, nil)
}
c.standby = false
} else {
// Go to standby mode, wait until we are active to unseal
@ -1405,11 +1456,6 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro
go c.runStandby(c.standbyDoneCh, c.manualStepDownCh, c.standbyStopCh)
}
// Force a cache bust here, which will also run migration code
if c.seal.RecoveryKeySupported() {
c.seal.SetRecoveryConfig(ctx, nil)
}
// Success!
atomic.StoreUint32(c.sealed, 0)
@ -2125,11 +2171,13 @@ func (c *Core) SetSealsForMigration(migrationSeal, newSeal, unwrapSeal Seal) {
c.unwrapSeal.SetCore(c)
}
if newSeal != nil && migrationSeal != nil {
c.migrationSeal = migrationSeal
c.migrationSeal.SetCore(c)
c.migrationInfo = &migrationInformation{
seal: migrationSeal,
}
c.migrationInfo.seal.SetCore(c)
c.seal = newSeal
c.seal.SetCore(c)
c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationSeal.BarrierType(), "to_barrier_type", c.seal.BarrierType())
c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal", "from_barrier_type", c.migrationInfo.seal.BarrierType(), "to_barrier_type", c.seal.BarrierType())
c.initSealsForMigration()
}
}
@ -2188,7 +2236,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([]
func (c *Core) IsInSealMigration() bool {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
return c.migrationSeal != nil
return c.migrationInfo != nil
}
func (c *Core) BarrierEncryptorAccess() *BarrierEncryptorAccess {

View File

@ -462,6 +462,18 @@ func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stop
c.activeContext = activeCtx
c.activeContextCancelFunc.Store(activeCtxCancel)
// Perform seal migration
if err := c.migrateSeal(c.activeContext); err != nil {
c.logger.Error("seal migration error", "error", err)
c.barrier.Seal()
c.logger.Warn("vault is sealed")
c.heldHALock = nil
lock.Unlock()
close(continueCh)
c.stateLock.Unlock()
return
}
// This block is used to wipe barrier/seal state and verify that
// everything is sane. If we have no sanity in the barrier, we actually
// seal, as there's little we can do.

View File

@ -894,7 +894,13 @@ func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error {
}
func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
for _, key := range c.BarrierKeys {
var keys [][]byte
if core.seal.RecoveryKeySupported() {
keys = c.RecoveryKeys
} else {
keys = c.BarrierKeys
}
for _, key := range keys {
if _, err := core.Core.Unseal(TestKeyCopy(key)); err != nil {
t.Fatalf("unseal err: %s", err)
}