Use Shamir as KeK when migrating from auto-seal to shamir (#8172)
* Use Shamir as KeK when migrating from auto-seal to shamir * Use the correct number of shares/threshold for the migrated seal. * Fix log message * Add WaitForActiveNode to test * Make test fail * Minor updates * Test with more shares and a threshold * Add seal/unseal step to the test * Update the logic that prepares seal migration (#8187) * Update the logic that preps seal migration * Add test and update recovery logic Co-authored-by: ncabatoff <nick.cabatoff@gmail.com> Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
This commit is contained in:
parent
004e3e5cd2
commit
fb93b0bf9c
|
@ -7,19 +7,125 @@ import (
|
|||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
"github.com/hashicorp/vault/shamir"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
wrapping "github.com/hashicorp/go-kms-wrapping"
|
||||
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
|
||||
"github.com/hashicorp/vault/api"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
|
||||
"github.com/hashicorp/vault/shamir"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
)
|
||||
|
||||
func TestSealMigrationAutoToShamir(t *testing.T) {
|
||||
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
|
||||
phys, err := physInmem.NewInmem(nil, logger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
haPhys, err := physInmem.NewInmemHA(nil, logger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
autoSeal := vault.NewAutoSeal(seal.NewTestSeal(nil))
|
||||
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
|
||||
Seal: autoSeal,
|
||||
Physical: phys,
|
||||
HAPhysical: haPhys.(physical.HABackend),
|
||||
DisableSealWrap: true,
|
||||
}, &vault.TestClusterOptions{
|
||||
Logger: logger,
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
SkipInit: true,
|
||||
NumCores: 1,
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
client := cluster.Cores[0].Client
|
||||
initResp, err := client.Sys().Init(&api.InitRequest{
|
||||
RecoveryShares: 5,
|
||||
RecoveryThreshold: 3,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testhelpers.WaitForActiveNode(t, cluster)
|
||||
|
||||
keys := initResp.RecoveryKeysB64
|
||||
rootToken := initResp.RootToken
|
||||
core := cluster.Cores[0].Core
|
||||
|
||||
client.SetToken(rootToken)
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shamirSeal := vault.NewDefaultSeal(&seal.Access{
|
||||
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
|
||||
Logger: logger.Named("shamir"),
|
||||
}),
|
||||
})
|
||||
shamirSeal.SetCore(core)
|
||||
|
||||
if err := adjustCoreForSealMigration(logger, core, shamirSeal, autoSeal); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var resp *api.SealStatusResponse
|
||||
unsealOpts := &api.UnsealOpts{}
|
||||
for _, key := range keys {
|
||||
unsealOpts.Key = key
|
||||
unsealOpts.Migrate = false
|
||||
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||
if err == nil {
|
||||
t.Fatal("expected error due to lack of migrate parameter")
|
||||
}
|
||||
unsealOpts.Migrate = true
|
||||
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("expected response")
|
||||
}
|
||||
if !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||
}
|
||||
|
||||
// Seal and unseal again to verify that things are working fine
|
||||
if err := client.Sys().Seal(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
unsealOpts.Migrate = false
|
||||
for _, key := range keys {
|
||||
unsealOpts.Key = key
|
||||
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("expected response")
|
||||
}
|
||||
if !resp.Sealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSealMigration(t *testing.T) {
|
||||
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
|
||||
phys, err := physInmem.NewInmem(nil, logger)
|
||||
|
@ -149,6 +255,41 @@ func TestSealMigration(t *testing.T) {
|
|||
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly
|
||||
b, err := autoSeal.BarrierConfig(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.Type != autoSeal.BarrierType() {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.SecretShares != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.SecretThreshold != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.StoredShares != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
|
||||
r, err := autoSeal.RecoveryConfig(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r.Type != wrapping.Shamir {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
if r.SecretShares != 2 {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
if r.SecretThreshold != 2 {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
if r.StoredShares != 0 {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
|
||||
cluster.Cleanup()
|
||||
cluster.Cores = nil
|
||||
}
|
||||
|
@ -243,6 +384,41 @@ func TestSealMigration(t *testing.T) {
|
|||
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly
|
||||
b, err := altSeal.BarrierConfig(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.Type != altSeal.BarrierType() {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.SecretShares != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.SecretThreshold != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.StoredShares != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
|
||||
r, err := altSeal.RecoveryConfig(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r.Type != wrapping.Shamir {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
if r.SecretShares != 2 {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
if r.SecretThreshold != 2 {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
if r.StoredShares != 0 {
|
||||
t.Fatalf("bad seal config: %#v", r)
|
||||
}
|
||||
|
||||
cluster.Cleanup()
|
||||
cluster.Cores = nil
|
||||
}
|
||||
|
@ -257,6 +433,12 @@ func TestSealMigration(t *testing.T) {
|
|||
|
||||
core := cluster.Cores[0].Core
|
||||
|
||||
wrapper := vault.NewDefaultSeal(&seal.Access{
|
||||
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
|
||||
Logger: logger.Named("shamir"),
|
||||
}),
|
||||
})
|
||||
|
||||
if err := adjustCoreForSealMigration(logger, core, wrapper, altSeal); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -286,6 +468,29 @@ func TestSealMigration(t *testing.T) {
|
|||
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly
|
||||
b, err := wrapper.BarrierConfig(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b.Type != wrapping.Shamir {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.SecretShares != 2 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.SecretThreshold != 2 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
if b.StoredShares != 1 {
|
||||
t.Fatalf("bad seal config: %#v", b)
|
||||
}
|
||||
|
||||
_, err = wrapper.RecoveryConfig(context.Background())
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
cluster.Cleanup()
|
||||
cluster.Cores = nil
|
||||
}
|
||||
|
|
|
@ -54,9 +54,6 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
|
|||
}
|
||||
}
|
||||
|
||||
var existSeal vault.Seal
|
||||
var newSeal vault.Seal
|
||||
|
||||
if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
|
||||
// In this case our migration seal is set so we are using it
|
||||
// (potentially) for unwrapping. Set it on core for that purpose then
|
||||
|
@ -69,15 +66,49 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
|
|||
return errors.New(`Recovery seal configuration not found for existing seal`)
|
||||
}
|
||||
|
||||
if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir {
|
||||
return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise")
|
||||
}
|
||||
|
||||
var migrationSeal vault.Seal
|
||||
var newSeal vault.Seal
|
||||
|
||||
// Determine the migrationSeal. This is either going to be an instance of
|
||||
// shamir or the unwrapSeal.
|
||||
switch existBarrierSealConfig.Type {
|
||||
case wrapping.Shamir:
|
||||
// The value reflected in config is what we're going to
|
||||
existSeal = vault.NewDefaultSeal(&vaultseal.Access{
|
||||
migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{
|
||||
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
|
||||
Logger: logger.Named("shamir"),
|
||||
}),
|
||||
})
|
||||
newSeal = barrierSeal
|
||||
|
||||
default:
|
||||
// If we're not coming from Shamir we expect the previous seal to be
|
||||
// in the config and disabled.
|
||||
migrationSeal = unwrapSeal
|
||||
}
|
||||
|
||||
// newSeal will be the barrierSeal
|
||||
newSeal = barrierSeal
|
||||
|
||||
// Set the appropriate barrier and recovery configs.
|
||||
switch {
|
||||
case migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
|
||||
// Migrating from auto->auto, copy the configs over
|
||||
newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
|
||||
newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
|
||||
case migrationSeal.RecoveryKeySupported():
|
||||
// Migrating from auto->shamir, clone auto's recovery config and set
|
||||
// stored keys to 1.
|
||||
newSealConfig := existRecoverySealConfig.Clone()
|
||||
newSealConfig.StoredShares = 1
|
||||
newSeal.SetCachedBarrierConfig(newSealConfig)
|
||||
case newSeal.RecoveryKeySupported():
|
||||
// Migrating from shamir->auto, set a new barrier config and set
|
||||
// recovery config to a clone of shamir's barrier config with stored
|
||||
// keys set to 0.
|
||||
newBarrierSealConfig := &vault.SealConfig{
|
||||
Type: newSeal.BarrierType(),
|
||||
SecretShares: 1,
|
||||
|
@ -85,21 +116,13 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
|
|||
StoredShares: 1,
|
||||
}
|
||||
newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
|
||||
newSeal.SetCachedRecoveryConfig(existBarrierSealConfig)
|
||||
|
||||
default:
|
||||
if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir {
|
||||
return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise")
|
||||
}
|
||||
|
||||
// If we're not coming from Shamir we expect the previous seal to be
|
||||
// in the config and disabled.
|
||||
existSeal = unwrapSeal
|
||||
newSeal = barrierSeal
|
||||
newSeal.SetCachedBarrierConfig(existRecoverySealConfig)
|
||||
newRecoveryConfig := existBarrierSealConfig.Clone()
|
||||
newRecoveryConfig.StoredShares = 0
|
||||
newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
|
||||
}
|
||||
|
||||
core.SetSealsForMigration(existSeal, newSeal, unwrapSeal)
|
||||
core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -495,7 +495,7 @@ type Core struct {
|
|||
recoveryMode bool
|
||||
|
||||
clusterNetworkLayer cluster.NetworkLayer
|
||||
|
||||
|
||||
// PR1103disabled is used to test upgrade workflows: when set to true,
|
||||
// the correct behaviour for namespaced cubbyholes is disabled, so we
|
||||
// can test an upgrade to a version that includes the fixes from
|
||||
|
|
Loading…
Reference in New Issue