2019-10-11 18:56:59 +00:00
package rafttests
2019-06-20 19:14:58 +00:00
import (
"bytes"
2020-01-14 01:02:16 +00:00
"context"
2019-10-14 15:25:07 +00:00
"crypto/md5"
2021-09-07 15:16:37 +00:00
"errors"
2019-06-20 19:14:58 +00:00
"fmt"
2021-09-07 15:16:37 +00:00
"io"
2019-06-20 19:14:58 +00:00
"io/ioutil"
"net/http"
"strings"
2021-09-07 15:16:37 +00:00
"sync"
2019-06-28 18:08:53 +00:00
"sync/atomic"
2019-06-20 19:14:58 +00:00
"testing"
"time"
2021-09-07 15:16:37 +00:00
vaultseal "github.com/hashicorp/vault/vault/seal"
2019-08-23 19:51:25 +00:00
"github.com/hashicorp/go-cleanhttp"
2021-04-20 22:25:04 +00:00
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
2020-09-22 21:47:13 +00:00
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
2021-06-25 21:18:34 +00:00
"github.com/hashicorp/vault/helper/constants"
2020-01-28 05:11:00 +00:00
"github.com/hashicorp/vault/helper/namespace"
2019-06-20 19:14:58 +00:00
"github.com/hashicorp/vault/helper/testhelpers"
2019-08-23 19:51:25 +00:00
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
2019-06-20 19:14:58 +00:00
vaulthttp "github.com/hashicorp/vault/http"
2021-04-26 23:01:26 +00:00
"github.com/hashicorp/vault/internalshared/configutil"
2019-06-28 18:08:53 +00:00
"github.com/hashicorp/vault/physical/raft"
2021-04-20 22:25:04 +00:00
"github.com/hashicorp/vault/sdk/logical"
2019-06-20 19:14:58 +00:00
"github.com/hashicorp/vault/vault"
2020-10-13 20:26:39 +00:00
"github.com/stretchr/testify/require"
2019-06-20 19:14:58 +00:00
"golang.org/x/net/http2"
)
2021-03-03 18:59:50 +00:00
type RaftClusterOpts struct {
2021-04-20 22:25:04 +00:00
DisableFollowerJoins bool
InmemCluster bool
EnableAutopilot bool
PhysicalFactoryConfig map [ string ] interface { }
DisablePerfStandby bool
EnableResponseHeaderRaftNodeID bool
2021-09-07 15:16:37 +00:00
NumCores int
Seal vault . Seal
2021-03-01 18:51:04 +00:00
}
2021-03-03 18:59:50 +00:00
func raftCluster ( t testing . TB , ropts * RaftClusterOpts ) * vault . TestCluster {
if ropts == nil {
ropts = & RaftClusterOpts { }
}
2020-09-22 21:47:13 +00:00
conf := & vault . CoreConfig {
CredentialBackends : map [ string ] logical . Factory {
"userpass" : credUserpass . Factory ,
} ,
2021-04-20 22:25:04 +00:00
DisableAutopilot : ! ropts . EnableAutopilot ,
EnableResponseHeaderRaftNodeID : ropts . EnableResponseHeaderRaftNodeID ,
2021-09-07 15:16:37 +00:00
Seal : ropts . Seal ,
2020-09-22 21:47:13 +00:00
}
2021-03-01 18:51:04 +00:00
2021-04-08 16:43:39 +00:00
opts := vault . TestClusterOptions {
2021-03-03 18:59:50 +00:00
HandlerFunc : vaulthttp . Handler ,
}
2021-03-17 21:23:13 +00:00
opts . InmemClusterLayers = ropts . InmemCluster
2021-03-03 18:59:50 +00:00
opts . PhysicalFactoryConfig = ropts . PhysicalFactoryConfig
conf . DisablePerformanceStandby = ropts . DisablePerfStandby
2021-09-07 15:16:37 +00:00
opts . NumCores = ropts . NumCores
2021-03-03 18:59:50 +00:00
2020-09-22 21:47:13 +00:00
teststorage . RaftBackendSetup ( conf , & opts )
2021-03-03 18:59:50 +00:00
if ropts . DisableFollowerJoins {
opts . SetupFunc = nil
}
2020-09-22 21:47:13 +00:00
cluster := vault . NewTestCluster ( t , conf , & opts )
2019-08-23 19:51:25 +00:00
cluster . Start ( )
vault . TestWaitActive ( t , cluster . Cores [ 0 ] . Core )
return cluster
}
2021-04-26 23:01:26 +00:00
func TestRaft_BoltDBMetrics ( t * testing . T ) {
t . Parallel ( )
conf := vault . CoreConfig { }
opts := vault . TestClusterOptions {
HandlerFunc : vaulthttp . Handler ,
NumCores : 1 ,
CoreMetricSinkProvider : testhelpers . TestMetricSinkProvider ( time . Minute ) ,
DefaultHandlerProperties : vault . HandlerProperties {
ListenerConfig : & configutil . Listener {
Telemetry : configutil . ListenerTelemetry {
UnauthenticatedMetricsAccess : true ,
} ,
} ,
} ,
}
teststorage . RaftBackendSetup ( & conf , & opts )
cluster := vault . NewTestCluster ( t , & conf , & opts )
cluster . Start ( )
defer cluster . Cleanup ( )
vault . TestWaitActive ( t , cluster . Cores [ 0 ] . Core )
leaderClient := cluster . Cores [ 0 ] . Client
// Write a few keys
for i := 0 ; i < 50 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
fmt . Sprintf ( "foo%d" , i ) : fmt . Sprintf ( "bar%d" , i ) ,
} )
if err != nil {
t . Fatal ( err )
}
}
// Even though there is a long delay between when we start the node and when we check for these metrics,
// the core metrics loop isn't started until postUnseal, which happens after said delay. This means we
// need a small artificial delay here as well, otherwise we won't see any metrics emitted.
time . Sleep ( 5 * time . Second )
data , err := testhelpers . SysMetricsReq ( leaderClient , cluster , true )
if err != nil {
t . Fatal ( err )
}
noBoltDBMetrics := true
for _ , g := range data . Gauges {
if strings . HasPrefix ( g . Name , "raft_storage.bolt." ) {
noBoltDBMetrics = false
break
}
}
if noBoltDBMetrics {
t . Fatal ( "expected to find boltdb metrics being emitted from the raft backend, but there were none" )
}
}
2020-10-13 20:26:39 +00:00
func TestRaft_RetryAutoJoin ( t * testing . T ) {
t . Parallel ( )
var (
conf vault . CoreConfig
opts = vault . TestClusterOptions { HandlerFunc : vaulthttp . Handler }
)
teststorage . RaftBackendSetup ( & conf , & opts )
opts . SetupFunc = nil
cluster := vault . NewTestCluster ( t , & conf , & opts )
cluster . Start ( )
defer cluster . Cleanup ( )
addressProvider := & testhelpers . TestRaftServerAddressProvider { Cluster : cluster }
leaderCore := cluster . Cores [ 0 ]
atomic . StoreUint32 ( & vault . TestingUpdateClusterAddr , 1 )
{
testhelpers . EnsureCoreSealed ( t , leaderCore )
leaderCore . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
cluster . UnsealCore ( t , leaderCore )
vault . TestWaitActive ( t , leaderCore . Core )
}
leaderInfos := [ ] * raft . LeaderJoinInfo {
{
AutoJoin : "provider=aws region=eu-west-1 tag_key=consul tag_value=tag access_key_id=a secret_access_key=a" ,
TLSConfig : leaderCore . TLSConfig ,
Retry : true ,
} ,
}
{
// expected to pass but not join as we're not actually discovering leader addresses
core := cluster . Cores [ 1 ]
core . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
_ , err := core . JoinRaftCluster ( namespace . RootContext ( context . Background ( ) ) , leaderInfos , false )
require . NoError ( t , err )
}
testhelpers . VerifyRaftPeers ( t , cluster . Cores [ 0 ] . Client , map [ string ] bool {
"core-0" : true ,
} )
}
2020-01-14 01:02:16 +00:00
func TestRaft_Retry_Join ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2020-01-14 01:02:16 +00:00
var conf vault . CoreConfig
2021-04-08 16:43:39 +00:00
opts := vault . TestClusterOptions { HandlerFunc : vaulthttp . Handler }
2020-01-14 01:02:16 +00:00
teststorage . RaftBackendSetup ( & conf , & opts )
opts . SetupFunc = nil
cluster := vault . NewTestCluster ( t , & conf , & opts )
cluster . Start ( )
defer cluster . Cleanup ( )
addressProvider := & testhelpers . TestRaftServerAddressProvider { Cluster : cluster }
leaderCore := cluster . Cores [ 0 ]
leaderAPI := leaderCore . Client . Address ( )
2020-06-23 19:04:13 +00:00
atomic . StoreUint32 ( & vault . TestingUpdateClusterAddr , 1 )
2020-01-14 01:02:16 +00:00
{
testhelpers . EnsureCoreSealed ( t , leaderCore )
leaderCore . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
cluster . UnsealCore ( t , leaderCore )
vault . TestWaitActive ( t , leaderCore . Core )
}
leaderInfos := [ ] * raft . LeaderJoinInfo {
2021-04-08 16:43:39 +00:00
{
2020-01-14 01:02:16 +00:00
LeaderAPIAddr : leaderAPI ,
TLSConfig : leaderCore . TLSConfig ,
Retry : true ,
} ,
}
{
core := cluster . Cores [ 1 ]
core . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
_ , err := core . JoinRaftCluster ( namespace . RootContext ( context . Background ( ) ) , leaderInfos , false )
if err != nil {
t . Fatal ( err )
}
time . Sleep ( 2 * time . Second )
cluster . UnsealCore ( t , core )
}
{
core := cluster . Cores [ 2 ]
core . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
_ , err := core . JoinRaftCluster ( namespace . RootContext ( context . Background ( ) ) , leaderInfos , false )
if err != nil {
t . Fatal ( err )
}
time . Sleep ( 2 * time . Second )
cluster . UnsealCore ( t , core )
}
2020-09-17 19:44:29 +00:00
testhelpers . VerifyRaftPeers ( t , cluster . Cores [ 0 ] . Client , map [ string ] bool {
2020-01-14 01:02:16 +00:00
"core-0" : true ,
"core-1" : true ,
"core-2" : true ,
} )
}
2019-06-28 18:08:53 +00:00
func TestRaft_Join ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2019-08-23 19:51:25 +00:00
var conf vault . CoreConfig
2021-04-08 16:43:39 +00:00
opts := vault . TestClusterOptions { HandlerFunc : vaulthttp . Handler }
2019-08-23 19:51:25 +00:00
teststorage . RaftBackendSetup ( & conf , & opts )
opts . SetupFunc = nil
cluster := vault . NewTestCluster ( t , & conf , & opts )
2019-06-28 18:08:53 +00:00
cluster . Start ( )
defer cluster . Cleanup ( )
addressProvider := & testhelpers . TestRaftServerAddressProvider { Cluster : cluster }
leaderCore := cluster . Cores [ 0 ]
leaderAPI := leaderCore . Client . Address ( )
2020-06-23 19:04:13 +00:00
atomic . StoreUint32 ( & vault . TestingUpdateClusterAddr , 1 )
2019-06-28 18:08:53 +00:00
// Seal the leader so we can install an address provider
{
testhelpers . EnsureCoreSealed ( t , leaderCore )
leaderCore . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
cluster . UnsealCore ( t , leaderCore )
vault . TestWaitActive ( t , leaderCore . Core )
}
joinFunc := func ( client * api . Client , addClientCerts bool ) {
req := & api . RaftJoinRequest {
LeaderAPIAddr : leaderAPI ,
LeaderCACert : string ( cluster . CACertPEM ) ,
}
if addClientCerts {
req . LeaderClientCert = string ( cluster . CACertPEM )
req . LeaderClientKey = string ( cluster . CAKeyPEM )
}
resp , err := client . Sys ( ) . RaftJoin ( req )
if err != nil {
t . Fatal ( err )
}
if ! resp . Joined {
t . Fatalf ( "failed to join raft cluster" )
}
}
joinFunc ( cluster . Cores [ 1 ] . Client , false )
joinFunc ( cluster . Cores [ 2 ] . Client , false )
_ , err := cluster . Cores [ 0 ] . Client . Logical ( ) . Write ( "sys/storage/raft/remove-peer" , map [ string ] interface { } {
"server_id" : "core-1" ,
} )
if err != nil {
t . Fatal ( err )
}
_ , err = cluster . Cores [ 0 ] . Client . Logical ( ) . Write ( "sys/storage/raft/remove-peer" , map [ string ] interface { } {
"server_id" : "core-2" ,
} )
if err != nil {
t . Fatal ( err )
}
joinFunc ( cluster . Cores [ 1 ] . Client , true )
joinFunc ( cluster . Cores [ 2 ] . Client , true )
}
func TestRaft_RemovePeer ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , nil )
2019-06-28 18:08:53 +00:00
defer cluster . Cleanup ( )
for i , c := range cluster . Cores {
if c . Core . Sealed ( ) {
t . Fatalf ( "failed to unseal core %d" , i )
}
}
client := cluster . Cores [ 0 ] . Client
2020-09-17 19:44:29 +00:00
testhelpers . VerifyRaftPeers ( t , client , map [ string ] bool {
2019-06-28 18:08:53 +00:00
"core-0" : true ,
"core-1" : true ,
"core-2" : true ,
} )
_ , err := client . Logical ( ) . Write ( "sys/storage/raft/remove-peer" , map [ string ] interface { } {
"server_id" : "core-2" ,
} )
if err != nil {
t . Fatal ( err )
}
2020-09-17 19:44:29 +00:00
testhelpers . VerifyRaftPeers ( t , client , map [ string ] bool {
2019-06-28 18:08:53 +00:00
"core-0" : true ,
"core-1" : true ,
} )
_ , err = client . Logical ( ) . Write ( "sys/storage/raft/remove-peer" , map [ string ] interface { } {
"server_id" : "core-1" ,
} )
if err != nil {
t . Fatal ( err )
}
2020-09-17 19:44:29 +00:00
testhelpers . VerifyRaftPeers ( t , client , map [ string ] bool {
2019-06-28 18:08:53 +00:00
"core-0" : true ,
} )
}
2021-04-20 22:25:04 +00:00
func TestRaft_NodeIDHeader ( t * testing . T ) {
t . Parallel ( )
testCases := [ ] struct {
description string
ropts * RaftClusterOpts
headerPresent bool
} {
{
description : "with no header configured" ,
ropts : nil ,
headerPresent : false ,
} ,
{
description : "with header configured" ,
ropts : & RaftClusterOpts {
EnableResponseHeaderRaftNodeID : true ,
} ,
headerPresent : true ,
} ,
}
for _ , tc := range testCases {
t . Run ( tc . description , func ( t * testing . T ) {
cluster := raftCluster ( t , tc . ropts )
defer cluster . Cleanup ( )
for i , c := range cluster . Cores {
if c . Core . Sealed ( ) {
t . Fatalf ( "failed to unseal core %d" , i )
}
client := c . Client
req := client . NewRequest ( "GET" , "/v1/sys/seal-status" )
resp , err := client . RawRequest ( req )
if err != nil {
t . Fatalf ( "err: %s" , err )
}
if resp == nil {
t . Fatalf ( "nil response" )
}
rniHeader := resp . Header . Get ( "X-Vault-Raft-Node-ID" )
nodeID := c . Core . GetRaftNodeID ( )
if tc . headerPresent && rniHeader == "" {
t . Fatal ( "missing 'X-Vault-Raft-Node-ID' header entry in response" )
}
if tc . headerPresent && rniHeader != nodeID {
t . Fatalf ( "got the wrong raft node id. expected %s to equal %s" , rniHeader , nodeID )
}
if ! tc . headerPresent && rniHeader != "" {
t . Fatal ( "didn't expect 'X-Vault-Raft-Node-ID' header but it was present anyway" )
}
}
} )
}
}
2019-06-28 18:08:53 +00:00
func TestRaft_Configuration ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , nil )
2019-06-28 18:08:53 +00:00
defer cluster . Cleanup ( )
for i , c := range cluster . Cores {
if c . Core . Sealed ( ) {
t . Fatalf ( "failed to unseal core %d" , i )
}
}
client := cluster . Cores [ 0 ] . Client
secret , err := client . Logical ( ) . Read ( "sys/storage/raft/configuration" )
if err != nil {
t . Fatal ( err )
}
servers := secret . Data [ "config" ] . ( map [ string ] interface { } ) [ "servers" ] . ( [ ] interface { } )
expected := map [ string ] bool {
"core-0" : true ,
"core-1" : true ,
"core-2" : true ,
}
if len ( servers ) != 3 {
t . Fatalf ( "incorrect number of servers in the configuration" )
}
for _ , s := range servers {
server := s . ( map [ string ] interface { } )
2019-07-25 12:41:14 +00:00
nodeID := server [ "node_id" ] . ( string )
leader := server [ "leader" ] . ( bool )
switch nodeID {
case "core-0" :
if ! leader {
t . Fatalf ( "expected server to be leader: %#v" , server )
}
default :
if leader {
t . Fatalf ( "expected server to not be leader: %#v" , server )
}
}
delete ( expected , nodeID )
2019-06-28 18:08:53 +00:00
}
if len ( expected ) != 0 {
t . Fatalf ( "failed to read configuration successfully" )
}
}
2019-06-20 19:14:58 +00:00
func TestRaft_ShamirUnseal ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , nil )
2019-06-20 19:14:58 +00:00
defer cluster . Cleanup ( )
for i , c := range cluster . Cores {
if c . Core . Sealed ( ) {
t . Fatalf ( "failed to unseal core %d" , i )
}
}
}
func TestRaft_SnapshotAPI ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , nil )
2019-06-20 19:14:58 +00:00
defer cluster . Cleanup ( )
leaderClient := cluster . Cores [ 0 ] . Client
// Write a few keys
for i := 0 ; i < 10 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
}
transport := cleanhttp . DefaultPooledTransport ( )
transport . TLSClientConfig = cluster . Cores [ 0 ] . TLSConfig . Clone ( )
if err := http2 . ConfigureTransport ( transport ) ; err != nil {
t . Fatal ( err )
}
client := & http . Client {
Transport : transport ,
}
// Take a snapshot
req := leaderClient . NewRequest ( "GET" , "/v1/sys/storage/raft/snapshot" )
httpReq , err := req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err := client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
defer resp . Body . Close ( )
snap , err := ioutil . ReadAll ( resp . Body )
if err != nil {
t . Fatal ( err )
}
if len ( snap ) == 0 {
t . Fatal ( "no snapshot returned" )
}
// Write a few more keys
for i := 10 ; i < 20 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
}
// Restore snapshot
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot" )
req . Body = bytes . NewBuffer ( snap )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
// List kv to make sure we removed the extra keys
secret , err := leaderClient . Logical ( ) . List ( "secret/" )
if err != nil {
t . Fatal ( err )
}
if len ( secret . Data [ "keys" ] . ( [ ] interface { } ) ) != 10 {
t . Fatal ( "snapshot didn't apply correctly" )
}
}
2021-09-07 15:16:37 +00:00
func TestRaft_SnapshotAPI_MidstreamFailure ( t * testing . T ) {
// defer goleak.VerifyNone(t)
t . Parallel ( )
seal , errptr := vaultseal . NewToggleableTestSeal ( nil )
cluster := raftCluster ( t , & RaftClusterOpts {
NumCores : 1 ,
Seal : vault . NewAutoSeal ( seal ) ,
} )
defer cluster . Cleanup ( )
leaderClient := cluster . Cores [ 0 ] . Client
// Write a bunch of keys; if too few, the detection code in api.RaftSnapshot
// will never make it into the tar part, it'll fail merely when trying to
// decompress the stream.
for i := 0 ; i < 1000 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
}
r , w := io . Pipe ( )
var snap [ ] byte
var wg sync . WaitGroup
wg . Add ( 1 )
var readErr error
go func ( ) {
snap , readErr = ioutil . ReadAll ( r )
wg . Done ( )
} ( )
* errptr = errors . New ( "seal failure" )
// Take a snapshot
err := leaderClient . Sys ( ) . RaftSnapshot ( w )
w . Close ( )
if err == nil || err != api . ErrIncompleteSnapshot {
t . Fatalf ( "expected err=%v, got: %v" , api . ErrIncompleteSnapshot , err )
}
wg . Wait ( )
if len ( snap ) == 0 && readErr == nil {
readErr = errors . New ( "no bytes read" )
}
if readErr != nil {
t . Fatal ( readErr )
}
}
2019-06-20 19:14:58 +00:00
func TestRaft_SnapshotAPI_RekeyRotate_Backward ( t * testing . T ) {
2021-03-01 18:51:04 +00:00
type testCase struct {
Name string
Rekey bool
Rotate bool
DisablePerfStandby bool
}
tCases := [ ] testCase {
2019-06-20 19:14:58 +00:00
{
2021-03-01 18:51:04 +00:00
Name : "rekey" ,
Rekey : true ,
Rotate : false ,
DisablePerfStandby : true ,
2019-06-20 19:14:58 +00:00
} ,
{
2021-03-01 18:51:04 +00:00
Name : "rotate" ,
Rekey : false ,
Rotate : true ,
DisablePerfStandby : true ,
2019-06-20 19:14:58 +00:00
} ,
{
2021-03-01 18:51:04 +00:00
Name : "both" ,
Rekey : true ,
Rotate : true ,
DisablePerfStandby : true ,
2019-06-20 19:14:58 +00:00
} ,
}
2021-06-25 21:18:34 +00:00
if constants . IsEnterprise {
2021-03-01 18:51:04 +00:00
tCases = append ( tCases , [ ] testCase {
{
Name : "rekey-with-perf-standby" ,
Rekey : true ,
Rotate : false ,
DisablePerfStandby : false ,
} ,
{
Name : "rotate-with-perf-standby" ,
Rekey : false ,
Rotate : true ,
DisablePerfStandby : false ,
} ,
{
Name : "both-with-perf-standby" ,
Rekey : true ,
Rotate : true ,
DisablePerfStandby : false ,
} ,
} ... )
}
2019-06-20 19:14:58 +00:00
for _ , tCase := range tCases {
t . Run ( tCase . Name , func ( t * testing . T ) {
// bind locally
tCaseLocal := tCase
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , & RaftClusterOpts { DisablePerfStandby : tCaseLocal . DisablePerfStandby } )
2019-08-23 19:51:25 +00:00
defer cluster . Cleanup ( )
2019-07-30 05:04:06 +00:00
2019-06-20 19:14:58 +00:00
leaderClient := cluster . Cores [ 0 ] . Client
// Write a few keys
for i := 0 ; i < 10 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
}
transport := cleanhttp . DefaultPooledTransport ( )
transport . TLSClientConfig = cluster . Cores [ 0 ] . TLSConfig . Clone ( )
if err := http2 . ConfigureTransport ( transport ) ; err != nil {
t . Fatal ( err )
}
client := & http . Client {
Transport : transport ,
}
// Take a snapshot
req := leaderClient . NewRequest ( "GET" , "/v1/sys/storage/raft/snapshot" )
httpReq , err := req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err := client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
defer resp . Body . Close ( )
snap , err := ioutil . ReadAll ( resp . Body )
if err != nil {
t . Fatal ( err )
}
if len ( snap ) == 0 {
t . Fatal ( "no snapshot returned" )
}
// cache the original barrier keys
barrierKeys := cluster . BarrierKeys
if tCaseLocal . Rotate {
// Rotate
err = leaderClient . Sys ( ) . Rotate ( )
if err != nil {
t . Fatal ( err )
}
2021-03-01 18:51:04 +00:00
testhelpers . EnsureStableActiveNode ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2019-06-20 19:14:58 +00:00
}
if tCaseLocal . Rekey {
// Rekey
2019-10-18 18:46:00 +00:00
cluster . BarrierKeys = testhelpers . RekeyCluster ( t , cluster , false )
2021-03-01 18:51:04 +00:00
testhelpers . EnsureStableActiveNode ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2019-06-20 19:14:58 +00:00
}
if tCaseLocal . Rekey {
// Restore snapshot, should fail.
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot" )
req . Body = bytes . NewBuffer ( snap )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
// Parse Response
apiResp := api . Response { Response : resp }
if ! strings . Contains ( apiResp . Error ( ) . Error ( ) , "could not verify hash file, possibly the snapshot is using a different set of unseal keys" ) {
t . Fatal ( apiResp . Error ( ) )
}
}
// Restore snapshot force
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot-force" )
req . Body = bytes . NewBuffer ( snap )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
testhelpers . EnsureStableActiveNode ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2019-06-20 19:14:58 +00:00
// Write some data so we can make sure we can read it later. This is testing
// that we correctly reload the keyring
_ , err = leaderClient . Logical ( ) . Write ( "secret/foo" , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
testhelpers . EnsureCoresSealed ( t , cluster )
cluster . BarrierKeys = barrierKeys
testhelpers . EnsureCoresUnsealed ( t , cluster )
testhelpers . WaitForActiveNode ( t , cluster )
2020-02-13 21:18:53 +00:00
activeCore := testhelpers . DeriveStableActiveCore ( t , cluster )
2019-06-20 19:14:58 +00:00
// Read the value.
data , err := activeCore . Client . Logical ( ) . Read ( "secret/foo" )
if err != nil {
t . Fatal ( err )
}
if data . Data [ "test" ] . ( string ) != "data" {
t . Fatal ( data )
}
} )
}
}
func TestRaft_SnapshotAPI_RekeyRotate_Forward ( t * testing . T ) {
2021-03-01 18:51:04 +00:00
type testCase struct {
Name string
Rekey bool
Rotate bool
ShouldSeal bool
DisablePerfStandby bool
}
tCases := [ ] testCase {
2019-06-20 19:14:58 +00:00
{
2021-03-01 18:51:04 +00:00
Name : "rekey" ,
Rekey : true ,
Rotate : false ,
ShouldSeal : false ,
DisablePerfStandby : true ,
2019-06-20 19:14:58 +00:00
} ,
{
Name : "rotate" ,
Rekey : false ,
Rotate : true ,
// Rotate writes a new master key upgrade using the new term, which
// we can no longer decrypt. We must seal here.
2021-03-01 18:51:04 +00:00
ShouldSeal : true ,
DisablePerfStandby : true ,
2019-06-20 19:14:58 +00:00
} ,
{
Name : "both" ,
Rekey : true ,
Rotate : true ,
// If we are moving forward and we have rekeyed and rotated there
// isn't any way to restore the latest keys so expect to seal.
2021-03-01 18:51:04 +00:00
ShouldSeal : true ,
DisablePerfStandby : true ,
2019-06-20 19:14:58 +00:00
} ,
}
2021-06-25 21:18:34 +00:00
if constants . IsEnterprise {
2021-03-01 18:51:04 +00:00
tCases = append ( tCases , [ ] testCase {
{
Name : "rekey-with-perf-standby" ,
Rekey : true ,
Rotate : false ,
ShouldSeal : false ,
DisablePerfStandby : false ,
} ,
{
Name : "rotate-with-perf-standby" ,
Rekey : false ,
Rotate : true ,
// Rotate writes a new master key upgrade using the new term, which
// we can no longer decrypt. We must seal here.
ShouldSeal : true ,
DisablePerfStandby : false ,
} ,
{
Name : "both-with-perf-standby" ,
Rekey : true ,
Rotate : true ,
// If we are moving forward and we have rekeyed and rotated there
// isn't any way to restore the latest keys so expect to seal.
ShouldSeal : true ,
DisablePerfStandby : false ,
} ,
} ... )
}
2019-06-20 19:14:58 +00:00
for _ , tCase := range tCases {
t . Run ( tCase . Name , func ( t * testing . T ) {
// bind locally
tCaseLocal := tCase
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , & RaftClusterOpts { DisablePerfStandby : tCaseLocal . DisablePerfStandby } )
2019-08-23 19:51:25 +00:00
defer cluster . Cleanup ( )
2019-07-30 05:04:06 +00:00
2019-06-20 19:14:58 +00:00
leaderClient := cluster . Cores [ 0 ] . Client
// Write a few keys
for i := 0 ; i < 10 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
}
transport := cleanhttp . DefaultPooledTransport ( )
transport . TLSClientConfig = cluster . Cores [ 0 ] . TLSConfig . Clone ( )
if err := http2 . ConfigureTransport ( transport ) ; err != nil {
t . Fatal ( err )
}
client := & http . Client {
Transport : transport ,
}
// Take a snapshot
req := leaderClient . NewRequest ( "GET" , "/v1/sys/storage/raft/snapshot" )
httpReq , err := req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err := client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
snap , err := ioutil . ReadAll ( resp . Body )
resp . Body . Close ( )
if err != nil {
t . Fatal ( err )
}
if len ( snap ) == 0 {
t . Fatal ( "no snapshot returned" )
}
if tCaseLocal . Rekey {
// Rekey
2019-10-18 18:46:00 +00:00
cluster . BarrierKeys = testhelpers . RekeyCluster ( t , cluster , false )
2021-03-01 18:51:04 +00:00
testhelpers . EnsureStableActiveNode ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2019-06-20 19:14:58 +00:00
}
if tCaseLocal . Rotate {
// Set the key clean up to 0 so it's cleaned immediately. This
// will simulate that there are no ways to upgrade to the latest
// term.
2020-12-08 18:57:44 +00:00
for _ , c := range cluster . Cores {
c . Core . SetKeyRotateGracePeriod ( 0 )
}
2019-06-20 19:14:58 +00:00
// Rotate
err = leaderClient . Sys ( ) . Rotate ( )
if err != nil {
t . Fatal ( err )
}
2021-03-01 18:51:04 +00:00
if ! tCaseLocal . DisablePerfStandby {
// Without the key upgrade the perf standby nodes will seal and
// raft will get into a failure state. Make sure we get the
// cluster back into a healthy state before moving forward.
testhelpers . WaitForNCoresSealed ( t , cluster , 2 )
testhelpers . EnsureCoresUnsealed ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2021-03-01 18:51:04 +00:00
active := testhelpers . DeriveActiveCore ( t , cluster )
leaderClient = active . Client
}
2019-06-20 19:14:58 +00:00
}
// cache the new barrier keys
newBarrierKeys := cluster . BarrierKeys
// Take another snapshot for later use in "jumping" forward
req = leaderClient . NewRequest ( "GET" , "/v1/sys/storage/raft/snapshot" )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
snap2 , err := ioutil . ReadAll ( resp . Body )
resp . Body . Close ( )
if err != nil {
t . Fatal ( err )
}
if len ( snap2 ) == 0 {
t . Fatal ( "no snapshot returned" )
}
// Restore snapshot to move us back in time so we can test going
// forward
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot-force" )
req . Body = bytes . NewBuffer ( snap )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
testhelpers . EnsureStableActiveNode ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2019-06-20 19:14:58 +00:00
if tCaseLocal . Rekey {
// Restore snapshot, should fail.
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot" )
req . Body = bytes . NewBuffer ( snap2 )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
// Parse Response
apiResp := api . Response { Response : resp }
2019-10-18 18:46:00 +00:00
if apiResp . Error ( ) == nil || ! strings . Contains ( apiResp . Error ( ) . Error ( ) , "could not verify hash file, possibly the snapshot is using a different set of unseal keys" ) {
t . Fatalf ( "expected error verifying hash file, got %v" , apiResp . Error ( ) )
2019-06-20 19:14:58 +00:00
}
2021-03-01 18:51:04 +00:00
2019-06-20 19:14:58 +00:00
}
// Restore snapshot force
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot-force" )
req . Body = bytes . NewBuffer ( snap2 )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
switch tCaseLocal . ShouldSeal {
case true :
testhelpers . WaitForNCoresSealed ( t , cluster , 3 )
case false :
testhelpers . EnsureStableActiveNode ( t , cluster )
2021-03-01 20:29:12 +00:00
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
2019-06-20 19:14:58 +00:00
// Write some data so we can make sure we can read it later. This is testing
// that we correctly reload the keyring
_ , err = leaderClient . Logical ( ) . Write ( "secret/foo" , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
testhelpers . EnsureCoresSealed ( t , cluster )
cluster . BarrierKeys = newBarrierKeys
testhelpers . EnsureCoresUnsealed ( t , cluster )
testhelpers . WaitForActiveNode ( t , cluster )
2020-02-13 21:18:53 +00:00
activeCore := testhelpers . DeriveStableActiveCore ( t , cluster )
2019-06-20 19:14:58 +00:00
// Read the value.
data , err := activeCore . Client . Logical ( ) . Read ( "secret/foo" )
if err != nil {
t . Fatal ( err )
}
if data . Data [ "test" ] . ( string ) != "data" {
t . Fatal ( data )
}
}
} )
}
}
func TestRaft_SnapshotAPI_DifferentCluster ( t * testing . T ) {
2020-09-15 14:01:26 +00:00
t . Parallel ( )
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( t , nil )
2019-06-20 19:14:58 +00:00
defer cluster . Cleanup ( )
leaderClient := cluster . Cores [ 0 ] . Client
// Write a few keys
for i := 0 ; i < 10 ; i ++ {
_ , err := leaderClient . Logical ( ) . Write ( fmt . Sprintf ( "secret/%d" , i ) , map [ string ] interface { } {
"test" : "data" ,
} )
if err != nil {
t . Fatal ( err )
}
}
transport := cleanhttp . DefaultPooledTransport ( )
transport . TLSClientConfig = cluster . Cores [ 0 ] . TLSConfig . Clone ( )
if err := http2 . ConfigureTransport ( transport ) ; err != nil {
t . Fatal ( err )
}
client := & http . Client {
Transport : transport ,
}
// Take a snapshot
req := leaderClient . NewRequest ( "GET" , "/v1/sys/storage/raft/snapshot" )
httpReq , err := req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err := client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
snap , err := ioutil . ReadAll ( resp . Body )
resp . Body . Close ( )
if err != nil {
t . Fatal ( err )
}
if len ( snap ) == 0 {
t . Fatal ( "no snapshot returned" )
}
// Cluster 2
{
2021-03-03 18:59:50 +00:00
cluster2 := raftCluster ( t , nil )
2019-06-20 19:14:58 +00:00
defer cluster2 . Cleanup ( )
leaderClient := cluster2 . Cores [ 0 ] . Client
transport := cleanhttp . DefaultPooledTransport ( )
transport . TLSClientConfig = cluster2 . Cores [ 0 ] . TLSConfig . Clone ( )
if err := http2 . ConfigureTransport ( transport ) ; err != nil {
t . Fatal ( err )
}
client := & http . Client {
Transport : transport ,
}
// Restore snapshot, should fail.
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot" )
req . Body = bytes . NewBuffer ( snap )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
// Parse Response
apiResp := api . Response { Response : resp }
if ! strings . Contains ( apiResp . Error ( ) . Error ( ) , "could not verify hash file, possibly the snapshot is using a different set of unseal keys" ) {
t . Fatal ( apiResp . Error ( ) )
}
// Restore snapshot force
req = leaderClient . NewRequest ( "POST" , "/v1/sys/storage/raft/snapshot-force" )
req . Body = bytes . NewBuffer ( snap )
httpReq , err = req . ToHTTP ( )
if err != nil {
t . Fatal ( err )
}
resp , err = client . Do ( httpReq )
if err != nil {
t . Fatal ( err )
}
testhelpers . WaitForNCoresSealed ( t , cluster2 , 3 )
}
}
2019-10-14 15:25:07 +00:00
func BenchmarkRaft_SingleNode ( b * testing . B ) {
2021-03-03 18:59:50 +00:00
cluster := raftCluster ( b , nil )
2019-10-14 15:25:07 +00:00
defer cluster . Cleanup ( )
leaderClient := cluster . Cores [ 0 ] . Client
bench := func ( b * testing . B , dataSize int ) {
data , err := uuid . GenerateRandomBytes ( dataSize )
if err != nil {
b . Fatal ( err )
}
testName := b . Name ( )
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
key := fmt . Sprintf ( "secret/%x" , md5 . Sum ( [ ] byte ( fmt . Sprintf ( "%s-%d" , testName , i ) ) ) )
_ , err := leaderClient . Logical ( ) . Write ( key , map [ string ] interface { } {
"test" : data ,
} )
if err != nil {
b . Fatal ( err )
}
}
}
b . Run ( "256b" , func ( b * testing . B ) { bench ( b , 25 ) } )
}
2020-12-08 18:55:34 +00:00
func TestRaft_Join_InitStatus ( t * testing . T ) {
t . Parallel ( )
var conf vault . CoreConfig
2021-04-08 16:43:39 +00:00
opts := vault . TestClusterOptions { HandlerFunc : vaulthttp . Handler }
2020-12-08 18:55:34 +00:00
teststorage . RaftBackendSetup ( & conf , & opts )
opts . SetupFunc = nil
cluster := vault . NewTestCluster ( t , & conf , & opts )
cluster . Start ( )
defer cluster . Cleanup ( )
addressProvider := & testhelpers . TestRaftServerAddressProvider { Cluster : cluster }
leaderCore := cluster . Cores [ 0 ]
leaderAPI := leaderCore . Client . Address ( )
atomic . StoreUint32 ( & vault . TestingUpdateClusterAddr , 1 )
// Seal the leader so we can install an address provider
{
testhelpers . EnsureCoreSealed ( t , leaderCore )
leaderCore . UnderlyingRawStorage . ( * raft . RaftBackend ) . SetServerAddressProvider ( addressProvider )
cluster . UnsealCore ( t , leaderCore )
vault . TestWaitActive ( t , leaderCore . Core )
}
joinFunc := func ( client * api . Client ) {
req := & api . RaftJoinRequest {
LeaderAPIAddr : leaderAPI ,
LeaderCACert : string ( cluster . CACertPEM ) ,
}
resp , err := client . Sys ( ) . RaftJoin ( req )
if err != nil {
t . Fatal ( err )
}
if ! resp . Joined {
t . Fatalf ( "failed to join raft cluster" )
}
}
verifyInitStatus := func ( coreIdx int , expected bool ) {
t . Helper ( )
client := cluster . Cores [ coreIdx ] . Client
initialized , err := client . Sys ( ) . InitStatus ( )
if err != nil {
t . Fatal ( err )
}
if initialized != expected {
t . Errorf ( "core %d: expected init=%v, sys/init returned %v" , coreIdx , expected , initialized )
}
status , err := client . Sys ( ) . SealStatus ( )
if err != nil {
t . Fatal ( err )
}
if status . Initialized != expected {
t . Errorf ( "core %d: expected init=%v, sys/seal-status returned %v" , coreIdx , expected , status . Initialized )
}
health , err := client . Sys ( ) . Health ( )
if err != nil {
t . Fatal ( err )
}
if health . Initialized != expected {
t . Errorf ( "core %d: expected init=%v, sys/health returned %v" , coreIdx , expected , health . Initialized )
}
}
for i := range cluster . Cores {
verifyInitStatus ( i , i < 1 )
}
joinFunc ( cluster . Cores [ 1 ] . Client )
for i , core := range cluster . Cores {
verifyInitStatus ( i , i < 2 )
if i == 1 {
cluster . UnsealCore ( t , core )
verifyInitStatus ( i , true )
}
}
joinFunc ( cluster . Cores [ 2 ] . Client )
for i , core := range cluster . Cores {
verifyInitStatus ( i , true )
if i == 2 {
cluster . UnsealCore ( t , core )
verifyInitStatus ( i , true )
}
}
testhelpers . WaitForActiveNodeAndStandbys ( t , cluster )
for i := range cluster . Cores {
verifyInitStatus ( i , true )
}
}