Same seal migration oss (#10224)
* Refactoring and test improvements. * Support migrating from a given type of autoseal to that same type but with different parameters.
This commit is contained in:
parent
071f651a5a
commit
0d6a929a4c
|
@ -1100,7 +1100,9 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
Logger: c.logger.Named("shamir"),
|
||||
}),
|
||||
})
|
||||
wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger)
|
||||
var sealInfoKeys []string
|
||||
var sealInfoMap = map[string]string{}
|
||||
wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger)
|
||||
if sealConfigError != nil {
|
||||
if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
|
||||
c.UI.Error(fmt.Sprintf(
|
||||
|
@ -1116,12 +1118,18 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
})
|
||||
}
|
||||
|
||||
var infoPrefix = ""
|
||||
if configSeal.Disabled {
|
||||
unwrapSeal = seal
|
||||
infoPrefix = "Old "
|
||||
} else {
|
||||
barrierSeal = seal
|
||||
barrierWrapper = wrapper
|
||||
}
|
||||
for _, k := range sealInfoKeys {
|
||||
infoKeys = append(infoKeys, infoPrefix+k)
|
||||
info[infoPrefix+k] = sealInfoMap[k]
|
||||
}
|
||||
|
||||
// Ensure that the seal finalizer is called, even if using verify-only
|
||||
defer func() {
|
||||
|
@ -1570,7 +1578,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
|
|||
// Vault cluster with multiple servers is configured with auto-unseal but is
|
||||
// uninitialized. Once one server initializes the storage backend, this
|
||||
// goroutine will pick up the unseal keys and unseal this instance.
|
||||
if !core.IsInSealMigration() {
|
||||
if !core.IsInSealMigrationMode() {
|
||||
go func() {
|
||||
for {
|
||||
err := core.UnsealWithStoredKeys(context.Background())
|
||||
|
|
|
@ -2,6 +2,7 @@ package sealhelper
|
|||
|
||||
import (
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
|
@ -20,7 +21,7 @@ type TransitSealServer struct {
|
|||
*vault.TestCluster
|
||||
}
|
||||
|
||||
func NewTransitSealServer(t testing.T) *TransitSealServer {
|
||||
func NewTransitSealServer(t testing.T, idx int) *TransitSealServer {
|
||||
conf := &vault.CoreConfig{
|
||||
LogicalBackends: map[string]logical.Factory{
|
||||
"transit": transit.Factory,
|
||||
|
@ -29,7 +30,7 @@ func NewTransitSealServer(t testing.T) *TransitSealServer {
|
|||
opts := &vault.TestClusterOptions{
|
||||
NumCores: 1,
|
||||
HandlerFunc: http.Handler,
|
||||
Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit"),
|
||||
Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(idx)),
|
||||
}
|
||||
teststorage.InmemBackendSetup(conf, opts)
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
|
|
|
@ -101,20 +101,6 @@ func handleSysUnseal(core *vault.Core) http.Handler {
|
|||
return
|
||||
}
|
||||
|
||||
isInSealMigration := core.IsInSealMigration()
|
||||
if !req.Migrate && isInSealMigration {
|
||||
respondError(
|
||||
w, http.StatusBadRequest,
|
||||
errors.New("'migrate' parameter must be set true in JSON body when in seal migration mode"))
|
||||
return
|
||||
}
|
||||
if req.Migrate && !isInSealMigration {
|
||||
respondError(
|
||||
w, http.StatusBadRequest,
|
||||
errors.New("'migrate' parameter set true in JSON body when not in seal migration mode"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Key == "" {
|
||||
respondError(
|
||||
w, http.StatusBadRequest,
|
||||
|
@ -138,9 +124,10 @@ func handleSysUnseal(core *vault.Core) http.Handler {
|
|||
}
|
||||
}
|
||||
|
||||
// Attempt the unseal
|
||||
if core.SealAccess().RecoveryKeySupported() {
|
||||
_, err = core.UnsealWithRecoveryKeys(key)
|
||||
// Attempt the unseal. If migrate was specified, the key should correspond
|
||||
// to the old seal.
|
||||
if req.Migrate {
|
||||
_, err = core.UnsealMigrate(key)
|
||||
} else {
|
||||
_, err = core.Unseal(key)
|
||||
}
|
||||
|
@ -231,7 +218,7 @@ func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Req
|
|||
Progress: progress,
|
||||
Nonce: nonce,
|
||||
Version: version.GetVersion().VersionNumber(),
|
||||
Migration: core.IsInSealMigration(),
|
||||
Migration: core.IsInSealMigrationMode() && !core.IsSealMigrated(),
|
||||
ClusterName: clusterName,
|
||||
ClusterID: clusterID,
|
||||
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
|
||||
|
|
793
vault/core.go
793
vault/core.go
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,3 @@
|
|||
// +build !enterprise
|
||||
|
||||
package sealmigration
|
||||
|
||||
import (
|
||||
|
@ -32,23 +30,24 @@ func TestSealMigration_TransitToShamir_Pre14(t *testing.T) {
|
|||
func testSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
|
||||
|
||||
// Create the transit server.
|
||||
tss := sealhelper.NewTransitSealServer(t)
|
||||
tss := sealhelper.NewTransitSealServer(t, 0)
|
||||
defer func() {
|
||||
if tss != nil {
|
||||
tss.Cleanup()
|
||||
}
|
||||
}()
|
||||
tss.MakeKey(t, "transit-seal-key")
|
||||
sealKeyName := "transit-seal-key"
|
||||
tss.MakeKey(t, sealKeyName)
|
||||
|
||||
// Initialize the backend with transit.
|
||||
cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
|
||||
cluster, opts := initializeTransit(t, logger, storage, basePort, tss, sealKeyName)
|
||||
rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
|
||||
cluster.EnsureCoresSealed(t)
|
||||
cluster.Cleanup()
|
||||
storage.Cleanup(t, cluster)
|
||||
|
||||
// Migrate the backend from transit to shamir
|
||||
migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys)
|
||||
migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, opts.SealFunc, rootToken, recoveryKeys)
|
||||
|
||||
// Now that migration is done, we can nuke the transit server, since we
|
||||
// can unseal without it.
|
||||
|
@ -60,25 +59,20 @@ func testSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, s
|
|||
runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
|
||||
}
|
||||
|
||||
func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, transitSeal vault.Seal, rootToken string, recoveryKeys [][]byte) {
|
||||
func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int,
|
||||
tss *sealhelper.TransitSealServer, sealFunc func() vault.Seal, rootToken string, recoveryKeys [][]byte) {
|
||||
|
||||
var baseClusterPort = basePort + 10
|
||||
|
||||
var conf = vault.CoreConfig{
|
||||
Logger: logger.Named("migrateFromTransitToShamir"),
|
||||
// N.B. Providing an UnwrapSeal puts us in migration mode. This is the
|
||||
// equivalent of doing the following in HCL:
|
||||
// seal "transit" {
|
||||
// // ...
|
||||
// disabled = "true"
|
||||
// }
|
||||
UnwrapSeal: transitSeal,
|
||||
}
|
||||
var conf vault.CoreConfig
|
||||
var opts = vault.TestClusterOptions{
|
||||
Logger: logger.Named("migrateFromTransitToShamir"),
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
NumCores: numTestCores,
|
||||
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
|
||||
BaseClusterListenPort: baseClusterPort,
|
||||
SkipInit: true,
|
||||
UnwrapSealFunc: sealFunc,
|
||||
}
|
||||
storage.Setup(&conf, &opts)
|
||||
cluster := vault.NewTestCluster(t, &conf, &opts)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
numTestCores = 5
|
||||
numTestCores = 3
|
||||
keyShares = 3
|
||||
keyThreshold = 3
|
||||
|
||||
|
@ -32,6 +32,7 @@ const (
|
|||
basePort_TransitToShamir_Pre14 = 21000
|
||||
basePort_ShamirToTransit_Post14 = 22000
|
||||
basePort_TransitToShamir_Post14 = 23000
|
||||
basePort_TransitToTransit = 24000
|
||||
)
|
||||
|
||||
type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int)
|
||||
|
@ -52,7 +53,6 @@ func testVariousBackends(t *testing.T, tf testFunc, basePort int, includeRaft bo
|
|||
|
||||
t.Run("file", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Skip("fails intermittently")
|
||||
|
||||
logger := logger.Named("file")
|
||||
storage, cleanup := teststorage.MakeReusableStorage(
|
||||
|
@ -103,31 +103,28 @@ func testSealMigrationShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, s
|
|||
// Initialize the backend using shamir
|
||||
cluster, _ := initializeShamir(t, logger, storage, basePort)
|
||||
rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys
|
||||
cluster.EnsureCoresSealed(t)
|
||||
cluster.Cleanup()
|
||||
storage.Cleanup(t, cluster)
|
||||
|
||||
// Create the transit server.
|
||||
tss := sealhelper.NewTransitSealServer(t)
|
||||
tss := sealhelper.NewTransitSealServer(t, 0)
|
||||
defer func() {
|
||||
tss.EnsureCoresSealed(t)
|
||||
tss.Cleanup()
|
||||
}()
|
||||
tss.MakeKey(t, "transit-seal-key")
|
||||
tss.MakeKey(t, "transit-seal-key-1")
|
||||
|
||||
// Migrate the backend from shamir to transit. Note that the barrier keys
|
||||
// are now the recovery keys.
|
||||
transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys)
|
||||
sealFunc := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys)
|
||||
|
||||
// Run the backend with transit.
|
||||
runTransit(t, logger, storage, basePort, rootToken, transitSeal)
|
||||
runAutoseal(t, logger, storage, basePort, rootToken, sealFunc)
|
||||
}
|
||||
|
||||
func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte) vault.Seal {
|
||||
func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte) func() vault.Seal {
|
||||
var baseClusterPort = basePort + 10
|
||||
|
||||
var transitSeal vault.Seal
|
||||
|
||||
var conf = vault.CoreConfig{}
|
||||
var opts = vault.TestClusterOptions{
|
||||
Logger: logger.Named("migrateFromShamirToTransit"),
|
||||
|
@ -138,8 +135,7 @@ func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage
|
|||
SkipInit: true,
|
||||
// N.B. Providing a transit seal puts us in migration mode.
|
||||
SealFunc: func() vault.Seal {
|
||||
transitSeal = tss.MakeSeal(t, "transit-seal-key")
|
||||
return transitSeal
|
||||
return tss.MakeSeal(t, "transit-seal-key")
|
||||
},
|
||||
}
|
||||
storage.Setup(&conf, &opts)
|
||||
|
@ -159,6 +155,8 @@ func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage
|
|||
// Wait for migration to finish.
|
||||
awaitMigration(t, leader.Client)
|
||||
|
||||
verifySealConfigTransit(t, leader)
|
||||
|
||||
// Read the secrets
|
||||
secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
|
||||
if err != nil {
|
||||
|
@ -176,17 +174,7 @@ func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly.
|
||||
b, r, err := leader.Core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
|
||||
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
|
||||
|
||||
cluster.EnsureCoresSealed(t)
|
||||
|
||||
return transitSeal
|
||||
return opts.SealFunc
|
||||
}
|
||||
|
||||
// TestSealMigration_ShamirToTransit_Post14 tests shamir-to-transit seal
|
||||
|
@ -202,59 +190,25 @@ func testSealMigrationShamirToTransit_Post14(t *testing.T, logger hclog.Logger,
|
|||
cluster, opts := initializeShamir(t, logger, storage, basePort)
|
||||
|
||||
// Create the transit server.
|
||||
tss := sealhelper.NewTransitSealServer(t)
|
||||
defer func() {
|
||||
tss.EnsureCoresSealed(t)
|
||||
tss.Cleanup()
|
||||
}()
|
||||
tss.MakeKey(t, "transit-seal-key")
|
||||
tss := sealhelper.NewTransitSealServer(t, 0)
|
||||
defer tss.Cleanup()
|
||||
sealKeyName := "transit-seal-key-1"
|
||||
tss.MakeKey(t, sealKeyName)
|
||||
|
||||
// Migrate the backend from shamir to transit.
|
||||
transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts)
|
||||
cluster.EnsureCoresSealed(t)
|
||||
|
||||
cluster.Cleanup()
|
||||
storage.Cleanup(t, cluster)
|
||||
|
||||
// Run the backend with transit.
|
||||
runTransit(t, logger, storage, basePort, cluster.RootToken, transitSeal)
|
||||
}
|
||||
|
||||
func migrateFromShamirToTransit_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, cluster *vault.TestCluster, opts *vault.TestClusterOptions) vault.Seal {
|
||||
|
||||
// N.B. Providing a transit seal puts us in migration mode.
|
||||
var transitSeal vault.Seal
|
||||
opts.SealFunc = func() vault.Seal {
|
||||
transitSeal = tss.MakeSeal(t, "transit-seal-key")
|
||||
return transitSeal
|
||||
}
|
||||
modifyCoreConfig := func(tcc *vault.TestClusterCore) {
|
||||
tcc.CoreConfig.Seal = transitSeal
|
||||
return tss.MakeSeal(t, sealKeyName)
|
||||
}
|
||||
|
||||
// Restart each follower with the new config, and migrate to Transit.
|
||||
// Note that the barrier keys are being used as recovery keys.
|
||||
leaderIdx := migratePost14(t, logger, storage, cluster, opts, cluster.BarrierKeys, modifyCoreConfig)
|
||||
leader := cluster.Cores[leaderIdx]
|
||||
leaderIdx := migratePost14(t, storage, cluster, opts, cluster.BarrierKeys)
|
||||
validateMigration(t, storage, cluster, leaderIdx, verifySealConfigTransit)
|
||||
cluster.Cleanup()
|
||||
storage.Cleanup(t, cluster)
|
||||
|
||||
// Read the secret
|
||||
secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly.
|
||||
b, r, err := leader.Core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
|
||||
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
|
||||
|
||||
return transitSeal
|
||||
// Run the backend with transit.
|
||||
runAutoseal(t, logger, storage, basePort, cluster.RootToken, opts.SealFunc)
|
||||
}
|
||||
|
||||
// TestSealMigration_TransitToShamir_Post14 tests transit-to-shamir seal
|
||||
|
@ -267,21 +221,25 @@ func TestSealMigration_TransitToShamir_Post14(t *testing.T) {
|
|||
|
||||
func testSealMigrationTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
|
||||
// Create the transit server.
|
||||
tss := sealhelper.NewTransitSealServer(t)
|
||||
tss := sealhelper.NewTransitSealServer(t, 0)
|
||||
defer func() {
|
||||
if tss != nil {
|
||||
tss.Cleanup()
|
||||
}
|
||||
}()
|
||||
tss.MakeKey(t, "transit-seal-key")
|
||||
sealKeyName := "transit-seal-key-1"
|
||||
tss.MakeKey(t, sealKeyName)
|
||||
|
||||
// Initialize the backend with transit.
|
||||
cluster, opts, transitSeal := initializeTransit(t, logger, storage, basePort, tss)
|
||||
cluster, opts := initializeTransit(t, logger, storage, basePort, tss, sealKeyName)
|
||||
rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
|
||||
|
||||
// Migrate the backend from transit to shamir
|
||||
migrateFromTransitToShamir_Post14(t, logger, storage, basePort, tss, transitSeal, cluster, opts)
|
||||
cluster.EnsureCoresSealed(t)
|
||||
opts.UnwrapSealFunc = opts.SealFunc
|
||||
opts.SealFunc = func() vault.Seal { return nil }
|
||||
leaderIdx := migratePost14(t, storage, cluster, opts, cluster.RecoveryKeys)
|
||||
validateMigration(t, storage, cluster, leaderIdx, verifySealConfigShamir)
|
||||
|
||||
cluster.Cleanup()
|
||||
storage.Cleanup(t, cluster)
|
||||
|
||||
|
@ -295,27 +253,12 @@ func testSealMigrationTransitToShamir_Post14(t *testing.T, logger hclog.Logger,
|
|||
runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
|
||||
}
|
||||
|
||||
func migrateFromTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, transitSeal vault.Seal, cluster *vault.TestCluster, opts *vault.TestClusterOptions) {
|
||||
func validateMigration(t *testing.T, storage teststorage.ReusableStorage,
|
||||
cluster *vault.TestCluster, leaderIdx int, f func(t *testing.T, core *vault.TestClusterCore)) {
|
||||
t.Helper()
|
||||
|
||||
opts.SealFunc = nil
|
||||
modifyCoreConfig := func(tcc *vault.TestClusterCore) {
|
||||
// Nil out the seal so it will be initialized as shamir.
|
||||
tcc.CoreConfig.Seal = nil
|
||||
|
||||
// N.B. Providing an UnwrapSeal puts us in migration mode. This is the
|
||||
// equivalent of doing the following in HCL:
|
||||
// seal "transit" {
|
||||
// // ...
|
||||
// disabled = "true"
|
||||
// }
|
||||
tcc.CoreConfig.UnwrapSeal = transitSeal
|
||||
}
|
||||
|
||||
// Restart each follower with the new config, and migrate to Shamir.
|
||||
leaderIdx := migratePost14(t, logger, storage, cluster, opts, cluster.RecoveryKeys, modifyCoreConfig)
|
||||
leader := cluster.Cores[leaderIdx]
|
||||
|
||||
// Read the secret
|
||||
secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -334,27 +277,70 @@ func migrateFromTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storag
|
|||
testhelpers.WaitForRaftApply(t, core, appliedIndex)
|
||||
}
|
||||
|
||||
// Make sure the seal configs were updated correctly.
|
||||
b, r, err := core.Core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
|
||||
if r != nil {
|
||||
t.Fatalf("expected nil recovery config, got: %#v", r)
|
||||
}
|
||||
f(t, core)
|
||||
}
|
||||
}
|
||||
|
||||
func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, cluster *vault.TestCluster, opts *vault.TestClusterOptions, unsealKeys [][]byte, modifyCoreConfig func(*vault.TestClusterCore)) int {
|
||||
// TestSealMigration_TransitToTransit tests transit-to-shamir seal
|
||||
// migration, using the post-1.4 method of bring individual nodes in the
|
||||
// cluster to do the migration.
|
||||
func TestSealMigration_TransitToTransit(t *testing.T) {
|
||||
testVariousBackends(t, testSealMigration_TransitToTransit, basePort_TransitToTransit, true)
|
||||
}
|
||||
|
||||
func testSealMigration_TransitToTransit(t *testing.T, logger hclog.Logger,
|
||||
storage teststorage.ReusableStorage, basePort int) {
|
||||
|
||||
// Create the transit server.
|
||||
tss1 := sealhelper.NewTransitSealServer(t, 0)
|
||||
defer func() {
|
||||
if tss1 != nil {
|
||||
tss1.Cleanup()
|
||||
}
|
||||
}()
|
||||
sealKeyName := "transit-seal-key-1"
|
||||
tss1.MakeKey(t, sealKeyName)
|
||||
|
||||
// Initialize the backend with transit.
|
||||
cluster, opts := initializeTransit(t, logger, storage, basePort, tss1, sealKeyName)
|
||||
rootToken := cluster.RootToken
|
||||
|
||||
// Create the transit server.
|
||||
tss2 := sealhelper.NewTransitSealServer(t, 1)
|
||||
defer func() {
|
||||
tss2.Cleanup()
|
||||
}()
|
||||
tss2.MakeKey(t, "transit-seal-key-2")
|
||||
|
||||
// Migrate the backend from transit to transit.
|
||||
opts.UnwrapSealFunc = opts.SealFunc
|
||||
opts.SealFunc = func() vault.Seal {
|
||||
return tss2.MakeSeal(t, "transit-seal-key-2")
|
||||
}
|
||||
leaderIdx := migratePost14(t, storage, cluster, opts, cluster.RecoveryKeys)
|
||||
validateMigration(t, storage, cluster, leaderIdx, verifySealConfigTransit)
|
||||
cluster.Cleanup()
|
||||
storage.Cleanup(t, cluster)
|
||||
|
||||
// Now that migration is done, we can nuke the transit server, since we
|
||||
// can unseal without it.
|
||||
tss1.Cleanup()
|
||||
tss1 = nil
|
||||
|
||||
// Run the backend with transit.
|
||||
runAutoseal(t, logger, storage, basePort, rootToken, opts.SealFunc)
|
||||
}
|
||||
|
||||
func migratePost14(t *testing.T, storage teststorage.ReusableStorage, cluster *vault.TestCluster,
|
||||
opts *vault.TestClusterOptions, unsealKeys [][]byte) int {
|
||||
|
||||
cluster.Logger = cluster.Logger.Named("migration")
|
||||
// Restart each follower with the new config, and migrate.
|
||||
for i := 1; i < len(cluster.Cores); i++ {
|
||||
cluster.StopCore(t, i)
|
||||
if storage.IsRaft {
|
||||
teststorage.CloseRaftStorage(t, cluster, i)
|
||||
}
|
||||
modifyCoreConfig(cluster.Cores[i])
|
||||
cluster.StartCore(t, i, opts)
|
||||
|
||||
unsealMigrate(t, cluster.Cores[i].Client, unsealKeys, true)
|
||||
|
@ -385,7 +371,7 @@ func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.Reusab
|
|||
}
|
||||
leader := cluster.Cores[leaderIdx]
|
||||
|
||||
// Wait for migration to occur on one of the 2 unsealed nodes
|
||||
// Wait for migration to occur on the leader
|
||||
awaitMigration(t, leader.Client)
|
||||
|
||||
var appliedIndex uint64
|
||||
|
@ -400,10 +386,9 @@ func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.Reusab
|
|||
teststorage.CloseRaftStorage(t, cluster, 0)
|
||||
}
|
||||
|
||||
// Modify the core
|
||||
modifyCoreConfig(cluster.Cores[0])
|
||||
|
||||
// Bring core 0 back up
|
||||
// Bring core 0 back up; we still have the seal migration config in place,
|
||||
// but now that migration has been performed we should be able to unseal
|
||||
// with the new seal and without using the `migrate` unseal option.
|
||||
cluster.StartCore(t, 0, opts)
|
||||
unseal(t, cluster.Cores[0].Client, unsealKeys)
|
||||
|
||||
|
@ -420,16 +405,16 @@ func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.Reusab
|
|||
|
||||
func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) {
|
||||
t.Helper()
|
||||
for i, key := range keys {
|
||||
// Try to unseal with missing "migrate" parameter
|
||||
_, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
|
||||
Key: base64.StdEncoding.EncodeToString(key),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error due to lack of migrate parameter")
|
||||
}
|
||||
if err := attemptUnseal(client, keys); err == nil {
|
||||
t.Fatal("expected error due to lack of migrate parameter")
|
||||
}
|
||||
if err := attemptUnsealMigrate(client, keys, transitServerAvailable); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Unseal with "migrate" parameter
|
||||
func attemptUnsealMigrate(client *api.Client, keys [][]byte, transitServerAvailable bool) error {
|
||||
for i, key := range keys {
|
||||
resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
|
||||
Key: base64.StdEncoding.EncodeToString(key),
|
||||
Migrate: true,
|
||||
|
@ -438,26 +423,27 @@ func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServe
|
|||
if i < keyThreshold-1 {
|
||||
// Not enough keys have been provided yet.
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if transitServerAvailable {
|
||||
// The transit server is running.
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
return fmt.Errorf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
} else {
|
||||
// The transit server is stopped.
|
||||
if err == nil {
|
||||
t.Fatal("expected error due to transit server being stopped.")
|
||||
return fmt.Errorf("expected error due to transit server being stopped.")
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// awaitMigration waits for migration to finish.
|
||||
|
@ -484,6 +470,12 @@ func awaitMigration(t *testing.T, client *api.Client) {
|
|||
|
||||
func unseal(t *testing.T, client *api.Client, keys [][]byte) {
|
||||
t.Helper()
|
||||
if err := attemptUnseal(client, keys); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func attemptUnseal(client *api.Client, keys [][]byte) error {
|
||||
for i, key := range keys {
|
||||
|
||||
resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
|
||||
|
@ -492,18 +484,41 @@ func unseal(t *testing.T, client *api.Client, keys [][]byte) {
|
|||
if i < keyThreshold-1 {
|
||||
// Not enough keys have been provided yet.
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return err
|
||||
}
|
||||
if resp == nil || resp.Sealed {
|
||||
t.Fatalf("expected unsealed state; got %#v", resp)
|
||||
return fmt.Errorf("expected unsealed state; got %#v", resp)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifySealConfigShamir(t *testing.T, core *vault.TestClusterCore) {
|
||||
t.Helper()
|
||||
b, r, err := core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
|
||||
if r != nil {
|
||||
t.Fatal("should not have recovery config for shamir")
|
||||
}
|
||||
}
|
||||
|
||||
func verifySealConfigTransit(t *testing.T, core *vault.TestClusterCore) {
|
||||
t.Helper()
|
||||
b, r, err := core.PhysicalSealConfigs(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
|
||||
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
|
||||
}
|
||||
|
||||
// verifyBarrierConfig verifies that a barrier configuration is correct.
|
||||
|
@ -554,7 +569,7 @@ func initializeShamir(t *testing.T, logger hclog.Logger, storage teststorage.Reu
|
|||
} else {
|
||||
cluster.UnsealCores(t)
|
||||
}
|
||||
testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
|
||||
testhelpers.WaitForActiveNodeAndStandbys(t, cluster)
|
||||
|
||||
err := client.Sys().Mount("kv-wrapped", &api.MountInput{
|
||||
SealWrap: true,
|
||||
|
@ -640,29 +655,25 @@ func runShamir(t *testing.T, logger hclog.Logger, storage teststorage.ReusableSt
|
|||
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
|
||||
// Seal the cluster
|
||||
cluster.EnsureCoresSealed(t)
|
||||
}
|
||||
|
||||
// initializeTransit initializes a brand new backend storage with Transit.
|
||||
func initializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer) (*vault.TestCluster, *vault.TestClusterOptions, vault.Seal) {
|
||||
func initializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int,
|
||||
tss *sealhelper.TransitSealServer, sealKeyName string) (*vault.TestCluster, *vault.TestClusterOptions) {
|
||||
t.Helper()
|
||||
var transitSeal vault.Seal
|
||||
|
||||
var baseClusterPort = basePort + 10
|
||||
|
||||
// Start the cluster
|
||||
var conf = vault.CoreConfig{}
|
||||
var opts = vault.TestClusterOptions{
|
||||
Logger: logger,
|
||||
Logger: logger.Named("initializeTransit"),
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
NumCores: numTestCores,
|
||||
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
|
||||
BaseClusterListenPort: baseClusterPort,
|
||||
SealFunc: func() vault.Seal {
|
||||
transitSeal = tss.MakeSeal(t, "transit-seal-key")
|
||||
return transitSeal
|
||||
return tss.MakeSeal(t, sealKeyName)
|
||||
},
|
||||
}
|
||||
storage.Setup(&conf, &opts)
|
||||
|
@ -698,16 +709,15 @@ func initializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.Re
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cluster, &opts, transitSeal
|
||||
return cluster, &opts
|
||||
}
|
||||
|
||||
func runTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, transitSeal vault.Seal) {
|
||||
func runAutoseal(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, sealFunc func() vault.Seal) {
|
||||
|
||||
var baseClusterPort = basePort + 10
|
||||
|
||||
// Start the cluster
|
||||
var conf = vault.CoreConfig{
|
||||
Seal: transitSeal,
|
||||
}
|
||||
var conf = vault.CoreConfig{}
|
||||
var opts = vault.TestClusterOptions{
|
||||
Logger: logger.Named("runTransit"),
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
|
@ -715,6 +725,7 @@ func runTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableS
|
|||
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
|
||||
BaseClusterListenPort: baseClusterPort,
|
||||
SkipInit: true,
|
||||
SealFunc: sealFunc,
|
||||
}
|
||||
storage.Setup(&conf, &opts)
|
||||
cluster := vault.NewTestCluster(t, &conf, &opts)
|
||||
|
@ -771,9 +782,6 @@ func runTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableS
|
|||
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
|
||||
// Seal the cluster
|
||||
cluster.EnsureCoresSealed(t)
|
||||
}
|
||||
|
||||
// joinRaftFollowers unseals the leader, and then joins-and-unseals the
|
||||
|
|
|
@ -39,7 +39,7 @@ type GenerateRootStrategy interface {
|
|||
type generateStandardRootToken struct{}
|
||||
|
||||
func (g generateStandardRootToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error {
|
||||
masterKey, err := c.unsealKeyToMasterKey(ctx, combinedKey)
|
||||
masterKey, err := c.unsealKeyToMasterKeyPostUnseal(ctx, combinedKey)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("unable to authenticate: {{err}}", err)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ type generateRecoveryToken struct {
|
|||
}
|
||||
|
||||
func (g *generateRecoveryToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error {
|
||||
key, err := c.unsealKeyToMasterKey(ctx, combinedKey)
|
||||
key, err := c.unsealKeyToMasterKeyPostUnseal(ctx, combinedKey)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("unable to authenticate: {{err}}", err)
|
||||
}
|
||||
|
|
|
@ -413,10 +413,13 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// Disallow auto-unsealing when migrating
|
||||
if c.IsInSealMigration() {
|
||||
if c.IsInSealMigrationMode() && !c.IsSealMigrated() {
|
||||
return NewNonFatalError(errors.New("cannot auto-unseal during seal migration"))
|
||||
}
|
||||
|
||||
c.stateLock.Lock()
|
||||
defer c.stateLock.Unlock()
|
||||
|
||||
sealed := c.Sealed()
|
||||
if !sealed {
|
||||
c.Logger().Warn("attempted unseal with stored keys, but vault is already unsealed")
|
||||
|
@ -434,27 +437,22 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
|
|||
if len(keys) == 0 {
|
||||
return NewNonFatalError(errors.New("stored unseal keys are supported, but none were found"))
|
||||
}
|
||||
|
||||
unsealed := false
|
||||
keysUsed := 0
|
||||
for _, key := range keys {
|
||||
unsealed, err = c.Unseal(key)
|
||||
if err != nil {
|
||||
return NewNonFatalError(errwrap.Wrapf("unseal with stored key failed: {{err}}", err))
|
||||
}
|
||||
keysUsed++
|
||||
if unsealed {
|
||||
break
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
return NewNonFatalError(errors.New("expected exactly one stored key"))
|
||||
}
|
||||
|
||||
if !unsealed {
|
||||
err = c.unsealInternal(ctx, keys[0])
|
||||
if err != nil {
|
||||
return NewNonFatalError(errwrap.Wrapf("unseal with stored key failed: {{err}}", err))
|
||||
}
|
||||
|
||||
if c.Sealed() {
|
||||
// This most likely means that the user configured Vault to only store a
|
||||
// subset of the required threshold of keys. We still consider this a
|
||||
// "success", since trying again would yield the same result.
|
||||
c.Logger().Warn("vault still sealed after using stored unseal keys", "stored_keys_used", keysUsed)
|
||||
c.Logger().Warn("vault still sealed after using stored unseal key")
|
||||
} else {
|
||||
c.Logger().Info("unsealed with stored keys", "stored_keys_used", keysUsed)
|
||||
c.Logger().Info("unsealed with stored key")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
|
@ -401,6 +402,36 @@ func (s *SealConfig) Clone() *SealConfig {
|
|||
return ret
|
||||
}
|
||||
|
||||
type ErrEncrypt struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ error = &ErrEncrypt{}
|
||||
|
||||
func (e *ErrEncrypt) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *ErrEncrypt) Is(target error) bool {
|
||||
_, ok := target.(*ErrEncrypt)
|
||||
return ok || errors.Is(e.Err, target)
|
||||
}
|
||||
|
||||
type ErrDecrypt struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ error = &ErrDecrypt{}
|
||||
|
||||
func (e *ErrDecrypt) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *ErrDecrypt) Is(target error) bool {
|
||||
_, ok := target.(*ErrDecrypt)
|
||||
return ok || errors.Is(e.Err, target)
|
||||
}
|
||||
|
||||
func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *seal.Access, keys [][]byte) error {
|
||||
if keys == nil {
|
||||
return fmt.Errorf("keys were nil")
|
||||
|
@ -417,7 +448,7 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *s
|
|||
// Encrypt and marshal the keys
|
||||
blobInfo, err := encryptor.Encrypt(ctx, buf, nil)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)
|
||||
return &ErrEncrypt{Err: errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)}
|
||||
}
|
||||
|
||||
value, err := proto.Marshal(blobInfo)
|
||||
|
@ -457,7 +488,7 @@ func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor *se
|
|||
|
||||
pt, err := encryptor.Decrypt(ctx, blobInfo, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err)
|
||||
return nil, &ErrDecrypt{Err: errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)}
|
||||
}
|
||||
|
||||
// Decode the barrier entry
|
||||
|
|
|
@ -49,6 +49,7 @@ func (a *Access) Type() string {
|
|||
return a.Wrapper.Type()
|
||||
}
|
||||
|
||||
// Encrypt uses the underlying seal to encrypt the plaintext and returns it.
|
||||
func (a *Access) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) {
|
||||
defer func(now time.Time) {
|
||||
metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now)
|
||||
|
@ -66,6 +67,9 @@ func (a *Access) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrap
|
|||
return a.Wrapper.Encrypt(ctx, plaintext, aad)
|
||||
}
|
||||
|
||||
// Decrypt uses the underlying seal to decrypt the cryptotext and returns it.
|
||||
// Note that it is possible depending on the wrapper used that both pt and err
|
||||
// are populated.
|
||||
func (a *Access) Decrypt(ctx context.Context, data *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) {
|
||||
defer func(now time.Time) {
|
||||
metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now)
|
||||
|
|
|
@ -295,10 +295,6 @@ func TestCoreUnseal(core *Core, key []byte) (bool, error) {
|
|||
return core.Unseal(key)
|
||||
}
|
||||
|
||||
func TestCoreUnsealWithRecoveryKeys(core *Core, key []byte) (bool, error) {
|
||||
return core.UnsealWithRecoveryKeys(key)
|
||||
}
|
||||
|
||||
// TestCoreUnsealed returns a pure in-memory core that is already
|
||||
// initialized and unsealed.
|
||||
func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
|
||||
|
@ -830,6 +826,7 @@ func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error {
|
|||
}
|
||||
|
||||
func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
|
||||
t.Helper()
|
||||
var keys [][]byte
|
||||
if core.seal.RecoveryKeySupported() {
|
||||
keys = c.RecoveryKeys
|
||||
|
@ -844,6 +841,7 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
|
|||
}
|
||||
|
||||
func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
|
||||
t.Helper()
|
||||
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1018,12 +1016,13 @@ type TestClusterOptions struct {
|
|||
// do not clash with any other explicitly assigned ports in other tests.
|
||||
BaseClusterListenPort int
|
||||
|
||||
NumCores int
|
||||
SealFunc func() Seal
|
||||
Logger log.Logger
|
||||
TempDir string
|
||||
CACert []byte
|
||||
CAKey *ecdsa.PrivateKey
|
||||
NumCores int
|
||||
SealFunc func() Seal
|
||||
UnwrapSealFunc func() Seal
|
||||
Logger log.Logger
|
||||
TempDir string
|
||||
CACert []byte
|
||||
CAKey *ecdsa.PrivateKey
|
||||
// PhysicalFactory is used to create backends.
|
||||
// The int argument is the index of the core within the cluster, i.e. first
|
||||
// core in cluster will have 0, second 1, etc.
|
||||
|
@ -1702,6 +1701,9 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo
|
|||
if opts != nil && opts.SealFunc != nil {
|
||||
localConfig.Seal = opts.SealFunc()
|
||||
}
|
||||
if opts != nil && opts.UnwrapSealFunc != nil {
|
||||
localConfig.UnwrapSeal = opts.UnwrapSealFunc()
|
||||
}
|
||||
|
||||
if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) {
|
||||
localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", idx))
|
||||
|
|
Loading…
Reference in New Issue