open-vault/vault/external_tests/api/sys_rekey_ext_test.go

298 lines
8 KiB
Go
Raw Normal View History

package api
import (
2018-05-20 06:42:15 +00:00
"encoding/base64"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
2018-05-20 23:01:24 +00:00
"github.com/hashicorp/vault/helper/testhelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical/inmem"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func TestSysRekey_Verification(t *testing.T) {
testcases := []struct {
recovery bool
legacyShamir bool
}{
{recovery: true, legacyShamir: false},
{recovery: false, legacyShamir: false},
{recovery: false, legacyShamir: true},
}
for _, tc := range testcases {
recovery, legacy := tc.recovery, tc.legacyShamir
t.Run(fmt.Sprintf("recovery=%v,legacyShamir=%v", recovery, legacy), func(t *testing.T) {
t.Parallel()
testSysRekey_Verification(t, recovery, legacy)
})
}
2018-05-20 21:38:04 +00:00
}
func testSysRekey_Verification(t *testing.T, recovery bool, legacyShamir bool) {
opts := &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
}
switch {
case recovery:
if legacyShamir {
panic("invalid case")
}
opts.SealFunc = func() vault.Seal {
return vault.NewTestSeal(t, &seal.TestSealOpts{
StoredKeys: seal.StoredKeysSupportedGeneric,
})
}
case legacyShamir:
opts.SealFunc = func() vault.Seal {
return vault.NewTestSeal(t, &seal.TestSealOpts{
StoredKeys: seal.StoredKeysNotSupported,
})
}
}
inm, err := inmem.NewInmemHA(nil, logging.NewVaultLogger(hclog.Debug))
if err != nil {
t.Fatal(err)
}
conf := vault.CoreConfig{
Physical: inm,
}
cluster := vault.NewTestCluster(t, &conf, opts)
cluster.Start()
defer cluster.Cleanup()
vault.TestWaitActive(t, cluster.Cores[0].Core)
client := cluster.Cores[0].Client
client.SetMaxRetries(0)
2018-05-20 21:38:04 +00:00
initFunc := client.Sys().RekeyInit
updateFunc := client.Sys().RekeyUpdate
verificationUpdateFunc := client.Sys().RekeyVerificationUpdate
verificationStatusFunc := client.Sys().RekeyVerificationStatus
verificationCancelFunc := client.Sys().RekeyVerificationCancel
if recovery {
initFunc = client.Sys().RekeyRecoveryKeyInit
updateFunc = client.Sys().RekeyRecoveryKeyUpdate
verificationUpdateFunc = client.Sys().RekeyRecoveryKeyVerificationUpdate
verificationStatusFunc = client.Sys().RekeyRecoveryKeyVerificationStatus
verificationCancelFunc = client.Sys().RekeyRecoveryKeyVerificationCancel
}
2018-05-20 06:42:15 +00:00
var verificationNonce string
var newKeys []string
doRekeyInitialSteps := func() {
2018-05-20 21:38:04 +00:00
status, err := initFunc(&api.RekeyInitRequest{
2018-05-20 06:42:15 +00:00
SecretShares: 5,
SecretThreshold: 3,
RequireVerification: true,
})
if err != nil {
t.Fatal(err)
}
if status == nil {
t.Fatal("nil status")
}
if !status.VerificationRequired {
t.Fatal("expected verification required")
}
keys := cluster.BarrierKeys
if recovery {
keys = cluster.RecoveryKeys
}
2018-05-20 06:42:15 +00:00
var resp *api.RekeyUpdateResponse
for i := 0; i < 3; i++ {
resp, err = updateFunc(base64.StdEncoding.EncodeToString(keys[i]), status.Nonce)
2018-05-20 06:42:15 +00:00
if err != nil {
t.Fatal(err)
}
}
switch {
case !resp.Complete:
t.Fatal("expected completion")
case !resp.VerificationRequired:
t.Fatal("expected verification required")
case resp.VerificationNonce == "":
t.Fatal("verification nonce expected")
}
verificationNonce = resp.VerificationNonce
newKeys = resp.KeysB64
t.Logf("verification nonce: %q", verificationNonce)
}
doRekeyInitialSteps()
// We are still going, so should not be able to init again
_, err = initFunc(&api.RekeyInitRequest{
SecretShares: 5,
SecretThreshold: 3,
RequireVerification: true,
})
2018-05-20 06:42:15 +00:00
if err == nil {
t.Fatal("expected error")
}
// Sealing should clear state, so after this we should be able to perform
// the above again
cluster.EnsureCoresSealed(t)
if err := cluster.UnsealCoresWithError(recovery); err != nil {
t.Fatal(err)
}
2018-05-20 06:42:15 +00:00
doRekeyInitialSteps()
doStartVerify := func() {
// Start the process
for i := 0; i < 2; i++ {
2018-05-20 21:38:04 +00:00
status, err := verificationUpdateFunc(newKeys[i], verificationNonce)
2018-05-20 06:42:15 +00:00
if err != nil {
t.Fatal(err)
}
switch {
case status.Nonce != verificationNonce:
t.Fatalf("unexpected nonce, expected %q, got %q", verificationNonce, status.Nonce)
case status.Complete:
t.Fatal("unexpected completion")
}
}
// Check status
2018-05-20 21:38:04 +00:00
vStatus, err := verificationStatusFunc()
2018-05-20 06:42:15 +00:00
if err != nil {
t.Fatal(err)
}
switch {
case vStatus.Nonce != verificationNonce:
t.Fatalf("unexpected nonce, expected %q, got %q", verificationNonce, vStatus.Nonce)
case vStatus.T != 3:
t.Fatal("unexpected threshold")
case vStatus.N != 5:
t.Fatal("unexpected number of new keys")
case vStatus.Progress != 2:
t.Fatal("unexpected progress")
}
}
doStartVerify()
// Cancel; this should still keep the rekey process going but just cancel
// the verification operation
2018-05-20 21:38:04 +00:00
err = verificationCancelFunc()
if err != nil {
t.Fatal(err)
}
2018-05-20 06:42:15 +00:00
// Verify cannot init again
2018-05-20 21:38:04 +00:00
_, err = initFunc(&api.RekeyInitRequest{
2018-05-20 06:42:15 +00:00
SecretShares: 5,
SecretThreshold: 3,
RequireVerification: true,
})
if err == nil {
t.Fatal("expected error")
}
2018-05-20 21:38:04 +00:00
vStatus, err := verificationStatusFunc()
2018-05-20 06:42:15 +00:00
if err != nil {
t.Fatal(err)
}
switch {
case vStatus.Nonce == verificationNonce:
t.Fatalf("unexpected nonce, expected not-%q but got it", verificationNonce)
case vStatus.T != 3:
t.Fatal("unexpected threshold")
case vStatus.N != 5:
t.Fatal("unexpected number of new keys")
case vStatus.Progress != 0:
t.Fatal("unexpected progress")
}
verificationNonce = vStatus.Nonce
doStartVerify()
2018-05-20 22:42:14 +00:00
if !recovery {
// Sealing should clear state, but we never actually finished, so it should
// still be the old keys (which are still currently set)
cluster.EnsureCoresSealed(t)
cluster.UnsealCores(t)
vault.TestWaitActive(t, cluster.Cores[0].Core)
2018-05-20 22:42:14 +00:00
// Should be able to init again and get back to where we were
doRekeyInitialSteps()
doStartVerify()
2018-05-20 23:01:24 +00:00
} else {
// We haven't finished, so generating a root token should still be the
// old keys (which are still currently set)
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
testhelpers.GenerateRoot(t, cluster, testhelpers.GenerateRootRegular)
2018-05-20 22:42:14 +00:00
}
2018-05-20 06:42:15 +00:00
// Provide the final new key
2018-05-20 21:38:04 +00:00
vuStatus, err := verificationUpdateFunc(newKeys[2], verificationNonce)
2018-05-20 06:42:15 +00:00
if err != nil {
t.Fatal(err)
}
switch {
case vuStatus.Nonce != verificationNonce:
t.Fatalf("unexpected nonce, expected %q, got %q", verificationNonce, vuStatus.Nonce)
case !vuStatus.Complete:
t.Fatal("expected completion")
}
2018-05-20 22:42:14 +00:00
if !recovery {
// Seal and unseal -- it should fail to unseal because the key has now been
// rotated
cluster.EnsureCoresSealed(t)
// Simulate restarting Vault rather than just a seal/unseal, because
// the standbys may not have had time to learn about the new key before
// we sealed them. We could sleep, but that's unreliable.
oldKeys := cluster.BarrierKeys
opts.SkipInit = true
opts.SealFunc = nil // post rekey we should use the barrier config on disk
cluster = vault.NewTestCluster(t, &conf, opts)
cluster.BarrierKeys = oldKeys
cluster.Start()
defer cluster.Cleanup()
if err := cluster.UnsealCoresWithError(false); err == nil {
2018-05-20 22:42:14 +00:00
t.Fatal("expected error")
}
2018-05-20 06:42:15 +00:00
2018-05-20 22:42:14 +00:00
// Swap out the keys with our new ones and try again
var newKeyBytes [][]byte
for _, key := range newKeys {
val, err := base64.StdEncoding.DecodeString(key)
if err != nil {
t.Fatal(err)
}
newKeyBytes = append(newKeyBytes, val)
}
cluster.BarrierKeys = newKeyBytes
if err := cluster.UnsealCoresWithError(false); err != nil {
t.Fatal(err)
2018-05-20 06:42:15 +00:00
}
2018-05-20 23:01:24 +00:00
} else {
// The old keys should no longer work
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
_, err := testhelpers.GenerateRootWithError(t, cluster, testhelpers.GenerateRootRegular)
2018-05-20 23:01:24 +00:00
if err == nil {
t.Fatal("expected error")
}
// Put the new keys in place and run again
2018-05-20 23:01:24 +00:00
cluster.RecoveryKeys = nil
for _, key := range newKeys {
dec, err := base64.StdEncoding.DecodeString(key)
if err != nil {
t.Fatal(err)
}
cluster.RecoveryKeys = append(cluster.RecoveryKeys, dec)
}
if err := client.Sys().GenerateRootCancel(); err != nil {
t.Fatal(err)
}
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
testhelpers.GenerateRoot(t, cluster, testhelpers.GenerateRootRegular)
}
}