open-vault/command/seal_migration_test.go

541 lines
13 KiB
Go
Raw Normal View History

// +build !enterprise
2018-10-23 06:34:02 +00:00
package command
import (
"context"
"encoding/base64"
"testing"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/shamir"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
2018-10-23 06:34:02 +00:00
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
2018-10-23 06:34:02 +00:00
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func TestSealMigrationAutoToShamir(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
phys, err := physInmem.NewInmem(nil, logger)
if err != nil {
t.Fatal(err)
}
haPhys, err := physInmem.NewInmemHA(nil, logger)
if err != nil {
t.Fatal(err)
}
autoSeal := vault.NewAutoSeal(seal.NewTestSeal(nil))
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
Seal: autoSeal,
Physical: phys,
HAPhysical: haPhys.(physical.HABackend),
DisableSealWrap: true,
}, &vault.TestClusterOptions{
Logger: logger,
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 1,
})
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
testhelpers.WaitForActiveNode(t, cluster)
keys := initResp.RecoveryKeysB64
rootToken := initResp.RootToken
core := cluster.Cores[0].Core
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
shamirSeal := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
shamirSeal.SetCore(core)
if err := adjustCoreForSealMigration(logger, core, shamirSeal, autoSeal); err != nil {
t.Fatal(err)
}
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
break
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
unsealOpts.Migrate = false
for _, key := range keys {
unsealOpts.Key = key
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
break
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
}
2018-10-23 06:34:02 +00:00
func TestSealMigration(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
2018-10-23 06:34:02 +00:00
phys, err := physInmem.NewInmem(nil, logger)
if err != nil {
t.Fatal(err)
}
haPhys, err := physInmem.NewInmemHA(nil, logger)
if err != nil {
t.Fatal(err)
}
2020-01-17 01:18:59 +00:00
wrapper := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
2018-10-23 06:34:02 +00:00
coreConfig := &vault.CoreConfig{
2020-01-17 01:18:59 +00:00
Seal: wrapper,
2018-10-23 06:34:02 +00:00
Physical: phys,
HAPhysical: haPhys.(physical.HABackend),
DisableSealWrap: true,
}
clusterConfig := &vault.TestClusterOptions{
Logger: logger,
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 1,
}
ctx := context.Background()
2018-10-23 06:34:02 +00:00
var keys []string
var rootToken string
{
logger.Info("integ: start up as normal with shamir seal, init it")
2018-10-23 06:34:02 +00:00
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
coreConfig = cluster.Cores[0].CoreConfig
// Init
resp, err := client.Sys().Init(&api.InitRequest{
SecretShares: 2,
SecretThreshold: 2,
})
if err != nil {
t.Fatal(err)
}
keys = resp.KeysB64
rootToken = resp.RootToken
// Now seal
cluster.Cleanup()
// This will prevent cleanup from running again on the defer
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: start up as normal with shamir seal and unseal, make sure everything is normal")
2018-10-23 06:34:02 +00:00
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
for _, key := range keys {
resp, err = client.Sys().Unseal(key)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatal("expected unsealed state")
}
cluster.Cleanup()
cluster.Cores = nil
}
var autoSeal vault.Seal
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: creating an autoseal and activating migration")
2018-10-23 06:34:02 +00:00
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
2019-02-01 19:29:55 +00:00
newSeal := vault.NewAutoSeal(seal.NewTestSeal(nil))
2018-10-23 06:34:02 +00:00
newSeal.SetCore(core)
autoSeal = newSeal
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
if err := adjustCoreForSealMigration(logger, core, newSeal, nil); err != nil {
2018-10-23 06:34:02 +00:00
t.Fatal(err)
}
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := autoSeal.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != autoSeal.BarrierType() {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
r, err := autoSeal.RecoveryConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if r.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.StoredShares != 0 {
t.Fatalf("bad seal config: %#v", r)
}
2018-10-23 06:34:02 +00:00
cluster.Cleanup()
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: verify autoseal and recovery key usage")
2018-10-23 06:34:02 +00:00
coreConfig.Seal = autoSeal
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
client := cluster.Cores[0].Client
client.SetToken(rootToken)
if err := core.UnsealWithStoredKeys(ctx); err != nil {
2018-10-23 06:34:02 +00:00
t.Fatal(err)
}
resp, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
keyParts := [][]byte{}
for _, key := range keys {
raw, err := base64.StdEncoding.DecodeString(key)
if err != nil {
t.Fatal(err)
}
keyParts = append(keyParts, raw)
}
recoveredKey, err := shamir.Combine(keyParts)
if err != nil {
t.Fatal(err)
}
sealAccess := core.SealAccess()
if err := sealAccess.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
2018-10-23 06:34:02 +00:00
t.Fatal(err)
}
cluster.Cleanup()
cluster.Cores = nil
}
// We should see stored barrier keys; after the sixth test, we shouldn't
if entry, err := phys.Get(ctx, vault.StoredBarrierKeysPath); err != nil || entry == nil {
t.Fatalf("expected nil error and non-nil entry, got error %#v and entry %#v", err, entry)
}
altTestSeal := seal.NewTestSeal(nil)
altTestSeal.SetType("test-alternate")
altSeal := vault.NewAutoSeal(altTestSeal)
2018-10-23 06:34:02 +00:00
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: migrate from auto-seal to auto-seal")
2018-10-23 06:34:02 +00:00
coreConfig.Seal = autoSeal
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
if err := adjustCoreForSealMigration(logger, core, altSeal, autoSeal); err != nil {
t.Fatal(err)
2018-10-23 06:34:02 +00:00
}
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
2018-10-23 06:34:02 +00:00
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := altSeal.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != altSeal.BarrierType() {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
r, err := altSeal.RecoveryConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if r.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.StoredShares != 0 {
t.Fatalf("bad seal config: %#v", r)
}
cluster.Cleanup()
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: create a Shamir seal and activate migration; verify it doesn't work if disabled isn't set.")
coreConfig.Seal = altSeal
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
wrapper := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
2020-01-17 01:18:59 +00:00
if err := adjustCoreForSealMigration(logger, core, wrapper, altSeal); err != nil {
2018-10-23 06:34:02 +00:00
t.Fatal(err)
}
client := cluster.Cores[0].Client
client.SetToken(rootToken)
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := wrapper.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
_, err = wrapper.RecoveryConfig(context.Background())
if err == nil {
t.Fatal("expected error")
}
2018-10-23 06:34:02 +00:00
cluster.Cleanup()
cluster.Cores = nil
}
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: verify autoseal is off and the expected key shares work")
2020-01-17 01:18:59 +00:00
coreConfig.Seal = wrapper
2018-10-23 06:34:02 +00:00
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
core := cluster.Cores[0].Core
client := cluster.Cores[0].Client
client.SetToken(rootToken)
if err := core.UnsealWithStoredKeys(ctx); err != nil {
2018-10-23 06:34:02 +00:00
t.Fatal(err)
}
resp, err := client.Sys().SealStatus()
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
t.Fatalf("expected sealed state; got %#v", *resp)
}
for _, key := range keys {
resp, err = client.Sys().Unseal(key)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
}
if resp.Sealed {
t.Fatal("expected unsealed state")
}
cluster.Cleanup()
cluster.Cores = nil
}
}