2015-03-09 23:33:27 +00:00
package vault
import (
2017-12-01 22:08:38 +00:00
"context"
2016-08-15 13:42:42 +00:00
"crypto/ecdsa"
2021-02-24 11:58:10 +00:00
"crypto/hmac"
2019-10-17 17:33:00 +00:00
"crypto/rand"
2021-02-24 11:58:10 +00:00
"crypto/sha256"
2017-02-17 01:13:19 +00:00
"crypto/subtle"
2019-10-28 16:51:45 +00:00
"crypto/tls"
2016-08-15 13:42:42 +00:00
"crypto/x509"
2021-02-24 11:58:10 +00:00
"encoding/base64"
"encoding/hex"
2020-07-30 17:15:00 +00:00
"encoding/json"
2015-03-10 00:45:34 +00:00
"errors"
2015-03-09 23:33:27 +00:00
"fmt"
2019-10-17 17:33:00 +00:00
"io"
2016-08-15 13:42:42 +00:00
"net"
"net/http"
2015-05-02 20:28:33 +00:00
"net/url"
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
"os"
2017-04-04 00:52:29 +00:00
"path/filepath"
2021-02-24 11:58:10 +00:00
"strconv"
"strings"
2015-03-10 00:45:34 +00:00
"sync"
2017-10-23 20:06:27 +00:00
"sync/atomic"
2015-04-08 23:43:17 +00:00
"time"
2015-03-09 23:33:27 +00:00
2021-03-03 18:59:50 +00:00
"github.com/hashicorp/vault/physical/raft"
2020-01-14 01:02:16 +00:00
"github.com/armon/go-metrics"
2015-11-02 16:01:00 +00:00
"github.com/hashicorp/errwrap"
2019-10-08 17:57:15 +00:00
log "github.com/hashicorp/go-hclog"
2020-01-11 01:39:52 +00:00
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
2020-02-13 21:27:31 +00:00
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
2019-10-08 17:57:15 +00:00
"github.com/hashicorp/vault/api"
2015-03-27 20:45:13 +00:00
"github.com/hashicorp/vault/audit"
2019-10-08 17:57:15 +00:00
"github.com/hashicorp/vault/command/server"
2020-10-13 23:38:21 +00:00
"github.com/hashicorp/vault/helper/metricsutil"
2019-04-13 07:44:06 +00:00
"github.com/hashicorp/vault/helper/namespace"
2020-10-13 23:38:21 +00:00
"github.com/hashicorp/vault/internalshared/reloadutil"
2019-04-12 21:54:35 +00:00
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/helper/mlock"
2019-06-20 19:14:58 +00:00
"github.com/hashicorp/vault/sdk/helper/strutil"
2019-04-12 22:26:54 +00:00
"github.com/hashicorp/vault/sdk/helper/tlsutil"
2019-04-12 21:54:35 +00:00
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/sdk/physical"
2019-12-06 14:46:39 +00:00
sr "github.com/hashicorp/vault/serviceregistration"
2015-03-11 18:34:08 +00:00
"github.com/hashicorp/vault/shamir"
2019-04-17 20:50:31 +00:00
"github.com/hashicorp/vault/vault/cluster"
2020-06-26 21:13:16 +00:00
"github.com/hashicorp/vault/vault/quotas"
2020-01-11 01:39:52 +00:00
vaultseal "github.com/hashicorp/vault/vault/seal"
2020-02-13 21:27:31 +00:00
"github.com/patrickmn/go-cache"
2021-02-24 11:58:10 +00:00
uberAtomic "go.uber.org/atomic"
2019-10-08 17:57:15 +00:00
"google.golang.org/grpc"
2015-03-09 23:33:27 +00:00
)
2015-03-10 00:45:34 +00:00
const (
2018-10-12 16:29:15 +00:00
// CoreLockPath is the path used to acquire a coordinating lock
2015-04-14 21:06:15 +00:00
// for a highly-available deploy.
2018-10-12 16:29:15 +00:00
CoreLockPath = "core/lock"
2015-04-14 21:06:15 +00:00
2017-03-03 20:00:46 +00:00
// The poison pill is used as a check during certain scenarios to indicate
// to standby nodes that they should seal
poisonPillPath = "core/poison-pill"
2015-04-14 23:44:48 +00:00
// coreLeaderPrefix is the prefix used for the UUID that contains
// the currently elected leader.
coreLeaderPrefix = "core/leader/"
2017-10-23 20:03:36 +00:00
// knownPrimaryAddrsPrefix is used to store last-known cluster address
// information for primaries
knownPrimaryAddrsPrefix = "core/primary-addrs/"
2017-02-17 01:13:19 +00:00
// coreKeyringCanaryPath is used as a canary to indicate to replicated
// clusters that they need to perform a rekey operation synchronously; this
// isn't keyring-canary to avoid ignoring it when ignoring core/keyring
coreKeyringCanaryPath = "core/canary-keyring"
2021-02-24 11:58:10 +00:00
indexHeaderHMACKeyPath = "core/index-header-hmac-key"
2015-03-10 00:45:34 +00:00
)
var (
// ErrAlreadyInit is returned if the core is already
// initialized. This prevents a re-initialization.
ErrAlreadyInit = errors . New ( "Vault is already initialized" )
// ErrNotInit is returned if a non-initialized barrier
// is attempted to be unsealed.
ErrNotInit = errors . New ( "Vault is not initialized" )
2015-03-16 22:28:50 +00:00
// ErrInternalError is returned when we don't want to leak
// any information about an internal error
ErrInternalError = errors . New ( "internal error" )
2015-04-14 23:53:40 +00:00
// ErrHANotEnabled is returned if the operation only makes sense
// in an HA setting
ErrHANotEnabled = errors . New ( "Vault is not configured for highly-available mode" )
2016-08-15 13:42:42 +00:00
// manualStepDownSleepPeriod is how long to sleep after a user-initiated
// step down of the active node, to prevent instantly regrabbing the lock.
// It's var not const so that tests can manipulate it.
manualStepDownSleepPeriod = 10 * time . Second
2017-02-17 01:13:19 +00:00
// Functions only in the Enterprise version
2019-10-27 20:30:38 +00:00
enterprisePostUnseal = enterprisePostUnsealImpl
enterprisePreSeal = enterprisePreSealImpl
enterpriseSetupFilteredPaths = enterpriseSetupFilteredPathsImpl
2020-06-26 21:13:16 +00:00
enterpriseSetupQuotas = enterpriseSetupQuotasImpl
2019-10-27 20:30:38 +00:00
startReplication = startReplicationImpl
stopReplication = stopReplicationImpl
LastWAL = lastWALImpl
LastPerformanceWAL = lastPerformanceWALImpl
PerformanceMerkleRoot = merkleRootImpl
DRMerkleRoot = merkleRootImpl
LastRemoteWAL = lastRemoteWALImpl
2021-02-24 11:58:10 +00:00
LastRemoteUpstreamWAL = lastRemoteUpstreamWALImpl
2019-10-27 20:30:38 +00:00
WaitUntilWALShipped = waitUntilWALShippedImpl
2015-03-10 00:45:34 +00:00
)
2016-04-04 14:44:22 +00:00
// NonFatalError is an error that can be returned during NewCore that should be
// displayed but not cause a program exit
type NonFatalError struct {
Err error
2015-03-09 23:33:27 +00:00
}
2016-04-04 14:44:22 +00:00
func ( e * NonFatalError ) WrappedErrors ( ) [ ] error {
return [ ] error { e . Err }
2015-03-10 00:45:34 +00:00
}
2016-04-04 14:44:22 +00:00
func ( e * NonFatalError ) Error ( ) string {
return e . Err . Error ( )
2015-03-10 00:45:34 +00:00
}
2019-01-23 21:34:34 +00:00
// NewNonFatalError returns a new non-fatal error.
func NewNonFatalError ( err error ) * NonFatalError {
return & NonFatalError { Err : err }
}
2019-06-27 10:50:47 +00:00
// IsFatalError returns true if the given error is a fatal error.
2019-01-23 21:34:34 +00:00
func IsFatalError ( err error ) bool {
return ! errwrap . ContainsType ( err , new ( NonFatalError ) )
}
2016-08-24 18:15:25 +00:00
// ErrInvalidKey is returned if there is a user-based error with a provided
// unseal key. This will be shown to the user, so should not contain
// information that is sensitive.
2015-03-12 18:20:27 +00:00
type ErrInvalidKey struct {
Reason string
}
func ( e * ErrInvalidKey ) Error ( ) string {
return fmt . Sprintf ( "invalid key: %v" , e . Reason )
}
2018-09-18 03:03:00 +00:00
type RegisterAuthFunc func ( context . Context , time . Duration , string , * logical . Auth ) error
2016-08-15 13:42:42 +00:00
type activeAdvertisement struct {
2019-02-15 02:14:56 +00:00
RedirectAddr string ` json:"redirect_addr" `
ClusterAddr string ` json:"cluster_addr,omitempty" `
ClusterCert [ ] byte ` json:"cluster_cert,omitempty" `
ClusterKeyParams * certutil . ClusterKeyParams ` json:"cluster_key_params,omitempty" `
2016-08-15 13:42:42 +00:00
}
2017-01-17 16:47:06 +00:00
type unlockInformation struct {
Parts [ ] [ ] byte
Nonce string
}
2019-10-11 18:56:59 +00:00
type raftInformation struct {
2020-01-11 01:39:52 +00:00
challenge * wrapping . EncryptedBlobInfo
2019-10-11 18:56:59 +00:00
leaderClient * api . Client
leaderBarrierConfig * SealConfig
2021-02-10 21:41:58 +00:00
nonVoter bool
2020-01-14 01:02:16 +00:00
joinInProgress bool
2019-10-11 18:56:59 +00:00
}
2020-02-13 21:27:31 +00:00
type migrationInformation struct {
// seal to use during a migration operation. It is the
// seal we're migrating *from*.
2020-10-23 18:16:04 +00:00
seal Seal
2020-02-13 21:27:31 +00:00
2020-10-23 18:16:04 +00:00
// unsealKey was the unseal key provided for the migration seal.
// This will be set as the recovery key when migrating from shamir to auto-seal.
// We don't need to do anything with it when migrating auto->shamir because
// we don't store the shamir combined key for shamir seals, nor when
// migrating auto->auto because then the recovery key doesn't change.
unsealKey [ ] byte
2020-02-13 21:27:31 +00:00
}
2015-03-09 23:33:27 +00:00
// Core is used as the central manager of Vault activity. It is the primary point of
// interface for API handlers and is responsible for managing the logical and physical
// backends, router, security barrier, and audit trails.
type Core struct {
2018-09-18 03:03:00 +00:00
entCore
2018-11-07 01:21:24 +00:00
// The registry of builtin plugins is passed in here as an interface because
// if it's used directly, it results in import cycles.
builtinRegistry BuiltinRegistry
2017-02-17 01:13:19 +00:00
// N.B.: This is used to populate a dev token down replication, as
// otherwise, after replication is started, a dev would have to go through
// the generate-root process simply to talk to the new follower cluster.
devToken string
2015-04-14 21:06:15 +00:00
// HABackend may be available depending on the physical backend
ha physical . HABackend
2019-09-18 19:07:18 +00:00
// storageType is the the storage type set in the storage configuration
storageType string
2016-08-15 13:42:42 +00:00
// redirectAddr is the address we advertise as leader if held
redirectAddr string
// clusterAddr is the address we use for clustering
2019-06-27 17:00:03 +00:00
clusterAddr * atomic . Value
2015-04-14 23:44:48 +00:00
2015-03-09 23:33:27 +00:00
// physical backend is the un-trusted backend with durable data
physical physical . Backend
2019-12-06 14:46:39 +00:00
// serviceRegistration is the ServiceRegistration network
serviceRegistration sr . ServiceRegistration
2019-06-20 19:14:58 +00:00
// underlyingPhysical will always point to the underlying backend
// implementation. This is an un-trusted backend with durable data
underlyingPhysical physical . Backend
2018-10-23 06:34:02 +00:00
// seal is our seal, for seal configuration information
2016-04-04 14:44:22 +00:00
seal Seal
2020-01-14 01:02:16 +00:00
// raftJoinDoneCh is used by the raft retry join routine to inform unseal process
// that the join is complete
raftJoinDoneCh chan struct { }
// postUnsealStarted informs the raft retry join routine that unseal key
// validation is completed and post unseal has started so that it can complete
// the join process when Shamir seal is in use
postUnsealStarted * uint32
2019-10-11 18:56:59 +00:00
// raftInfo will contain information required for this node to join as a
// peer to an existing raft cluster
raftInfo * raftInformation
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
// migrationInfo is used during (and possibly after) a seal migration.
// This contains information about the seal we are migrating *from*. Even
// post seal migration, provided the old seal is still in configuration
// migrationInfo will be populated, which on enterprise may be necessary for
// seal rewrap.
migrationInfo * migrationInformation
sealMigrationDone * uint32
2019-03-04 22:11:56 +00:00
2015-03-09 23:33:27 +00:00
// barrier is the security barrier wrapping the physical backend
barrier SecurityBarrier
// router is responsible for managing the mount points for logical backends.
router * Router
2015-03-10 00:45:34 +00:00
2015-03-18 22:21:41 +00:00
// logicalBackends is the mapping of backends to use for this core
logicalBackends map [ string ] logical . Factory
// credentialBackends is the mapping of backends to use for this core
2015-03-31 01:07:05 +00:00
credentialBackends map [ string ] logical . Factory
2015-03-15 23:25:38 +00:00
2015-03-27 20:45:13 +00:00
// auditBackends is the mapping of backends to use for this core
auditBackends map [ string ] audit . Factory
2015-03-10 00:45:34 +00:00
// stateLock protects mutable state
2020-03-10 20:01:20 +00:00
stateLock DeadlockRWMutex
2018-07-24 20:57:25 +00:00
sealed * uint32
2015-03-10 00:45:34 +00:00
2018-03-07 02:35:58 +00:00
standby bool
2018-08-27 17:01:07 +00:00
perfStandby bool
2018-03-07 02:35:58 +00:00
standbyDoneCh chan struct { }
2020-07-21 12:34:07 +00:00
standbyStopCh * atomic . Value
2018-03-07 02:35:58 +00:00
manualStepDownCh chan struct { }
2018-06-09 19:35:22 +00:00
keepHALockOnStepDown * uint32
2018-03-07 02:35:58 +00:00
heldHALock physical . Lock
2015-04-14 21:06:15 +00:00
2020-02-15 02:07:31 +00:00
// shutdownDoneCh is used to notify when Shutdown() completes
shutdownDoneCh chan struct { }
2017-01-17 16:47:06 +00:00
// unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce
unlockInfo * unlockInformation
2015-03-10 00:45:34 +00:00
2016-01-15 15:55:35 +00:00
// generateRootProgress holds the shares until we reach enough
2016-01-09 02:21:02 +00:00
// to verify the master key
2016-01-15 15:55:35 +00:00
generateRootConfig * GenerateRootConfig
generateRootProgress [ ] [ ] byte
generateRootLock sync . Mutex
2016-01-09 02:21:02 +00:00
2016-04-04 14:44:22 +00:00
// These variables holds the config and shares we have until we reach
// enough to verify the appropriate master key. Note that the same lock is
// used; this isn't time-critical so this shouldn't be a problem.
2018-05-21 21:46:32 +00:00
barrierRekeyConfig * SealConfig
recoveryRekeyConfig * SealConfig
rekeyLock sync . RWMutex
2015-05-28 18:40:01 +00:00
2015-03-11 22:19:41 +00:00
// mounts is loaded after unseal since it is a protected
// configuration
2015-03-17 22:28:01 +00:00
mounts * MountTable
2015-03-11 22:19:41 +00:00
2015-11-11 16:44:07 +00:00
// mountsLock is used to ensure that the mounts table does not
// change underneath a calling function
mountsLock sync . RWMutex
2015-03-18 22:46:07 +00:00
// auth is loaded after unseal since it is a protected
// configuration
2015-03-19 16:54:57 +00:00
auth * MountTable
2015-03-18 22:46:07 +00:00
2015-11-11 16:44:07 +00:00
// authLock is used to ensure that the auth table does not
// change underneath a calling function
authLock sync . RWMutex
2015-03-27 20:45:13 +00:00
// audit is loaded after unseal since it is a protected
// configuration
audit * MountTable
2015-11-11 16:44:07 +00:00
// auditLock is used to ensure that the audit table does not
// change underneath a calling function
auditLock sync . RWMutex
2015-03-31 20:22:40 +00:00
// auditBroker is used to ingest the audit events and fan
// out into the configured audit backends
auditBroker * AuditBroker
2017-02-02 19:49:20 +00:00
// auditedHeaders is used to configure which http headers
// can be output in the audit logs
auditedHeaders * AuditedHeadersConfig
2017-10-11 17:21:20 +00:00
// systemBackend is the backend which is used to manage internal operations
systemBackend * SystemBackend
2018-09-18 03:03:00 +00:00
// cubbyholeBackend is the backend which manages the per-token storage
cubbyholeBackend * CubbyholeBackend
2015-09-04 20:58:12 +00:00
// systemBarrierView is the barrier view for the system backend
systemBarrierView * BarrierView
2015-03-12 19:41:12 +00:00
2015-04-08 20:35:32 +00:00
// expiration manager is used for managing LeaseIDs,
2015-03-12 19:44:22 +00:00
// renewal, expiration and revocation
expiration * ExpirationManager
2015-03-17 23:23:58 +00:00
// rollback manager is used to run rollbacks periodically
rollback * RollbackManager
2015-03-18 21:00:42 +00:00
// policy store is used to manage named ACL policies
2015-11-06 16:52:26 +00:00
policyStore * PolicyStore
2015-03-18 21:00:42 +00:00
2015-03-23 20:41:05 +00:00
// token store is used to manage authentication tokens
tokenStore * TokenStore
2017-10-11 17:21:20 +00:00
// identityStore is used to manage client entities
identityStore * IdentityStore
2020-09-08 19:22:09 +00:00
// activityLog is used to track active client count
activityLog * ActivityLog
2015-04-08 23:43:17 +00:00
// metricsCh is used to stop the metrics streaming
metricsCh chan struct { }
2015-10-12 20:33:54 +00:00
// metricsMutex is used to prevent a race condition between
// metrics emission and sealing leading to a nil pointer
metricsMutex sync . Mutex
2020-05-13 02:00:59 +00:00
// metricSink is the destination for all metrics that have
// a cluster label.
metricSink * metricsutil . ClusterMetricSink
2015-08-27 14:50:16 +00:00
defaultLeaseTTL time . Duration
maxLeaseTTL time . Duration
2015-07-30 13:42:49 +00:00
2018-08-23 19:04:18 +00:00
// baseLogger is used to avoid ResetNamed as it strips useful prefixes in
// e.g. testing
baseLogger log . Logger
logger log . Logger
2016-04-21 13:52:42 +00:00
2020-08-10 10:23:44 +00:00
// Disables the trace display for Sentinel checks
sentinelTraceDisabled bool
2016-04-21 20:32:06 +00:00
// cachingDisabled indicates whether caches are disabled
cachingDisabled bool
2018-01-26 03:21:51 +00:00
// Cache stores the actual cache; we always have this but may bypass it if
// disabled
physicalCache physical . ToggleablePurgemonster
2016-07-26 06:25:33 +00:00
2016-09-30 04:06:40 +00:00
// reloadFuncs is a map containing reload functions
2020-02-15 19:58:05 +00:00
reloadFuncs map [ string ] [ ] reloadutil . ReloadFunc
2016-09-30 04:06:40 +00:00
2017-07-03 18:54:01 +00:00
// reloadFuncsLock controls access to the funcs
2016-09-30 04:06:40 +00:00
reloadFuncsLock sync . RWMutex
2017-01-04 21:44:03 +00:00
// wrappingJWTKey is the key used for generating JWTs containing response
// wrapping information
wrappingJWTKey * ecdsa . PrivateKey
2016-08-15 13:42:42 +00:00
//
// Cluster information
//
// Name
2016-07-26 14:01:35 +00:00
clusterName string
2021-02-24 11:58:10 +00:00
// ID
clusterID uberAtomic . String
2017-08-30 20:28:23 +00:00
// Specific cipher suites to use for clustering, if any
clusterCipherSuites [ ] uint16
2017-02-17 01:13:19 +00:00
// Used to modify cluster parameters
2016-08-15 13:42:42 +00:00
clusterParamsLock sync . RWMutex
// The private key stored in the barrier used for establishing
// mutually-authenticated connections between Vault cluster members
2018-02-23 19:47:07 +00:00
localClusterPrivateKey * atomic . Value
2016-08-15 13:42:42 +00:00
// The local cluster cert
2018-02-23 19:47:07 +00:00
localClusterCert * atomic . Value
2017-03-02 15:03:49 +00:00
// The parsed form of the local cluster cert
2018-02-23 19:47:07 +00:00
localClusterParsedCert * atomic . Value
2016-08-19 15:03:53 +00:00
// The TCP addresses we should use for clustering
clusterListenerAddrs [ ] * net . TCPAddr
2017-05-24 14:38:48 +00:00
// The handler to use for request forwarding
clusterHandler http . Handler
2016-08-15 13:42:42 +00:00
// Write lock used to ensure that we don't have multiple connections adjust
// this value at the same time
requestForwardingConnectionLock sync . RWMutex
2019-02-06 02:01:18 +00:00
// Lock for the leader values, ensuring we don't run the parts of Leader()
// that change things concurrently
leaderParamsLock sync . RWMutex
// Current cluster leader values
clusterLeaderParams * atomic . Value
2017-05-25 00:51:53 +00:00
// Info on cluster members
2017-05-25 01:10:32 +00:00
clusterPeerClusterAddrsCache * cache . Cache
2017-05-24 19:06:56 +00:00
// The context for the client
rpcClientConnContext context . Context
2016-08-19 15:03:53 +00:00
// The function for canceling the client connection
rpcClientConnCancelFunc context . CancelFunc
// The grpc ClientConn for RPC calls
rpcClientConn * grpc . ClientConn
// The grpc forwarding client
2017-05-24 19:06:56 +00:00
rpcForwardingClient * forwardingClient
2019-06-20 19:14:58 +00:00
// The UUID used to hold the leader lock. Only set on active node
leaderUUID string
2017-01-13 19:51:10 +00:00
2017-06-17 04:04:55 +00:00
// CORS Information
corsConfig * CORSConfig
2017-10-23 20:03:36 +00:00
// The active set of upstream cluster addresses; stored via the Echo
// mechanism, loaded by the balancer
atomicPrimaryClusterAddrs * atomic . Value
atomicPrimaryFailoverAddrs * atomic . Value
2018-09-18 03:03:00 +00:00
2017-01-13 19:51:10 +00:00
// replicationState keeps the current replication state cached for quick
2018-01-20 00:24:04 +00:00
// lookup; activeNodeReplicationState stores the active value on standbys
replicationState * uint32
activeNodeReplicationState * uint32
2017-02-24 15:45:29 +00:00
2018-03-27 20:23:33 +00:00
// uiConfig contains UI configuration
uiConfig * UIConfig
2017-04-04 00:52:29 +00:00
2017-09-15 04:21:35 +00:00
// rawEnabled indicates whether the Raw endpoint is enabled
rawEnabled bool
2017-04-11 00:12:52 +00:00
// pluginDirectory is the location vault will look for plugin binaries
2017-04-04 00:52:29 +00:00
pluginDirectory string
// pluginCatalog is used to manage plugin configurations
pluginCatalog * PluginCatalog
2017-04-11 00:12:52 +00:00
2017-04-24 19:21:49 +00:00
enableMlock bool
2017-08-04 20:42:51 +00:00
// This can be used to trigger operations to stop running when Vault is
// going to be shut down, stepped down, or sealed
2018-01-19 06:44:44 +00:00
activeContext context . Context
2018-08-01 19:07:37 +00:00
activeContextCancelFunc * atomic . Value
2018-02-09 21:37:40 +00:00
// Stores the sealunwrapper for downgrade needs
sealUnwrapper physical . Backend
2018-04-19 17:29:43 +00:00
2019-01-23 21:34:34 +00:00
// unsealwithStoredKeysLock is a mutex that prevents multiple processes from
// unsealing with stored keys are the same time.
unsealWithStoredKeysLock sync . Mutex
2018-04-19 17:29:43 +00:00
// Stores any funcs that should be run on successful postUnseal
postUnsealFuncs [ ] func ( )
2018-09-05 19:52:54 +00:00
2019-10-15 04:55:31 +00:00
// Stores any funcs that should be run on successful barrier unseal in
// recovery mode
postRecoveryUnsealFuncs [ ] func ( ) error
2018-09-18 03:03:00 +00:00
// replicationFailure is used to mark when replication has entered an
// unrecoverable failure.
replicationFailure * uint32
// disablePerfStanby is used to tell a standby not to attempt to become a
// perf standby
disablePerfStandby bool
licensingStopCh chan struct { }
2018-09-05 19:52:54 +00:00
// Stores loggers so we can reset the level
allLoggers [ ] log . Logger
allLoggersLock sync . RWMutex
2019-02-06 02:01:18 +00:00
// Can be toggled atomically to cause the core to never try to become
// active, or give up active as soon as it gets it
neverBecomeActive * uint32
2019-02-08 21:32:06 +00:00
// loadCaseSensitiveIdentityStore enforces the loading of identity store
// artifacts in a case sensitive manner. To be used only in testing.
2019-02-15 02:14:56 +00:00
loadCaseSensitiveIdentityStore bool
// clusterListener starts up and manages connections on the cluster ports
2019-09-03 15:59:56 +00:00
clusterListener * atomic . Value
2019-02-20 20:12:21 +00:00
2019-02-14 20:46:59 +00:00
// Telemetry objects
2019-02-20 20:12:21 +00:00
metricsHelper * metricsutil . MetricsHelper
2019-03-05 19:55:07 +00:00
// Stores request counters
counters counters
2019-06-10 18:07:16 +00:00
2021-03-03 18:59:50 +00:00
// raftFollowerStates tracks information about all the raft follower nodes.
raftFollowerStates * raft . FollowerStates
2019-06-20 19:14:58 +00:00
// Stop channel for raft TLS rotations
raftTLSRotationStopCh chan struct { }
2019-07-03 20:56:30 +00:00
// Stores the pending peers we are waiting to give answers
2020-05-19 01:22:25 +00:00
pendingRaftPeers * sync . Map
2019-06-20 19:14:58 +00:00
2019-10-08 17:57:15 +00:00
// rawConfig stores the config as-is from the provided server configuration.
2020-04-16 23:34:46 +00:00
rawConfig * atomic . Value
2019-10-08 17:57:15 +00:00
2019-06-10 18:07:16 +00:00
coreNumber int
2019-10-15 04:55:31 +00:00
2019-10-17 17:33:00 +00:00
// secureRandomReader is the reader used for CSP operations
secureRandomReader io . Reader
2019-10-15 04:55:31 +00:00
recoveryMode bool
2020-01-17 07:03:02 +00:00
clusterNetworkLayer cluster . NetworkLayer
2020-01-21 17:24:33 +00:00
2020-01-17 07:03:02 +00:00
// PR1103disabled is used to test upgrade workflows: when set to true,
// the correct behaviour for namespaced cubbyholes is disabled, so we
// can test an upgrade to a version that includes the fixes from
// https://github.com/hashicorp/vault-enterprise/pull/1103
PR1103disabled bool
2020-06-26 21:13:16 +00:00
quotaManager * quotas . Manager
2020-07-27 20:10:26 +00:00
clusterHeartbeatInterval time . Duration
2020-10-29 23:47:34 +00:00
activityLogConfig ActivityLogCoreConfig
2020-12-08 18:57:44 +00:00
2020-12-12 00:50:19 +00:00
// activeTime is set on active nodes indicating the time at which this node
// became active.
activeTime time . Time
2020-12-08 18:57:44 +00:00
// KeyRotateGracePeriod is how long we allow an upgrade path
// for standby instances before we delete the upgrade keys
keyRotateGracePeriod * int64
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
2021-02-25 20:27:25 +00:00
autoRotateCancel context . CancelFunc
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
// number of workers to use for lease revocation in the expiration manager
numExpirationWorkers int
2021-02-24 11:58:10 +00:00
IndexHeaderHMACKey uberAtomic . Value
2021-03-03 18:59:50 +00:00
// disableAutopilot is used to disable the autopilot subsystem in raft storage
disableAutopilot bool
2015-03-09 23:33:27 +00:00
}
2015-03-11 18:52:01 +00:00
// CoreConfig is used to parameterize a core
type CoreConfig struct {
2020-02-15 00:39:13 +00:00
entCoreConfig
2019-12-06 14:46:39 +00:00
DevToken string
2017-02-17 01:13:19 +00:00
2019-12-06 14:46:39 +00:00
BuiltinRegistry BuiltinRegistry
2018-11-07 01:21:24 +00:00
2019-12-06 14:46:39 +00:00
LogicalBackends map [ string ] logical . Factory
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
CredentialBackends map [ string ] logical . Factory
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
AuditBackends map [ string ] audit . Factory
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
Physical physical . Backend
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
StorageType string
2019-09-18 19:07:18 +00:00
2016-07-26 06:25:33 +00:00
// May be nil, which disables HA operations
2019-12-06 14:46:39 +00:00
HAPhysical physical . HABackend
ServiceRegistration sr . ServiceRegistration
2016-07-26 06:25:33 +00:00
2020-10-23 18:16:04 +00:00
// Seal is the configured seal, or if none is configured explicitly, a
// shamir seal. In migration scenarios this is the new seal.
Seal Seal
// Unwrap seal is the optional seal marked "disabled"; this is the old
// seal in migration scenarios.
2020-06-11 19:07:59 +00:00
UnwrapSeal Seal
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
SecureRandomReader io . Reader
2019-10-17 17:33:00 +00:00
2019-12-06 14:46:39 +00:00
Logger log . Logger
2016-07-26 06:25:33 +00:00
2020-08-10 10:23:44 +00:00
// Disables the trace display for Sentinel checks
DisableSentinelTrace bool
2016-07-26 06:25:33 +00:00
// Disables the LRU cache on the physical backend
2019-12-06 14:46:39 +00:00
DisableCache bool
2016-07-26 06:25:33 +00:00
// Disables mlock syscall
2019-12-06 14:46:39 +00:00
DisableMlock bool
2016-07-26 06:25:33 +00:00
2016-08-26 14:27:06 +00:00
// Custom cache size for the LRU cache on the physical backend, or zero for default
2019-12-06 14:46:39 +00:00
CacheSize int
2016-07-26 06:25:33 +00:00
// Set as the leader address for HA
2019-12-06 14:46:39 +00:00
RedirectAddr string
2016-08-15 13:42:42 +00:00
// Set as the cluster address for HA
2019-12-06 14:46:39 +00:00
ClusterAddr string
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
DefaultLeaseTTL time . Duration
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
MaxLeaseTTL time . Duration
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
ClusterName string
2016-09-30 04:06:40 +00:00
2019-12-06 14:46:39 +00:00
ClusterCipherSuites string
2017-08-30 20:28:23 +00:00
2019-12-06 14:46:39 +00:00
EnableUI bool
2017-02-24 15:45:29 +00:00
2017-09-15 04:21:35 +00:00
// Enable the raw endpoint
2019-12-06 14:46:39 +00:00
EnableRaw bool
2017-09-15 04:21:35 +00:00
2019-12-06 14:46:39 +00:00
PluginDirectory string
2017-04-04 00:52:29 +00:00
2019-12-06 14:46:39 +00:00
DisableSealWrap bool
2018-09-18 03:03:00 +00:00
2019-10-08 17:57:15 +00:00
RawConfig * server . Config
2020-02-15 19:58:05 +00:00
ReloadFuncs * map [ string ] [ ] reloadutil . ReloadFunc
2016-09-30 04:06:40 +00:00
ReloadFuncsLock * sync . RWMutex
2018-09-05 19:52:54 +00:00
2018-09-18 03:03:00 +00:00
// Licensing
LicensingConfig * LicensingConfig
// Don't set this unless in dev mode, ideally only when using inmem
DevLicenseDuration time . Duration
DisablePerformanceStandby bool
2018-10-23 19:03:17 +00:00
DisableIndexing bool
2018-11-19 21:13:16 +00:00
DisableKeyEncodingChecks bool
2018-09-18 03:03:00 +00:00
2018-09-05 19:52:54 +00:00
AllLoggers [ ] log . Logger
2019-02-14 20:46:59 +00:00
// Telemetry objects
MetricsHelper * metricsutil . MetricsHelper
2020-05-13 02:00:59 +00:00
MetricSink * metricsutil . ClusterMetricSink
2019-03-05 19:55:07 +00:00
CounterSyncInterval time . Duration
2019-10-15 04:55:31 +00:00
RecoveryMode bool
2020-01-17 07:03:02 +00:00
ClusterNetworkLayer cluster . NetworkLayer
2015-03-11 18:52:01 +00:00
2020-07-27 20:10:26 +00:00
ClusterHeartbeatInterval time . Duration
2020-10-29 23:47:34 +00:00
// Activity log controls
ActivityLogConfig ActivityLogCoreConfig
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
// number of workers to use for lease revocation in the expiration manager
NumExpirationWorkers int
2021-03-03 18:59:50 +00:00
// DisableAutopilot is used to disable autopilot subsystem in raft storage
DisableAutopilot bool
2018-10-23 06:34:02 +00:00
}
2019-12-06 14:46:39 +00:00
// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
// not exist.
func ( c * CoreConfig ) GetServiceRegistration ( ) sr . ServiceRegistration {
2020-05-15 18:06:58 +00:00
// Check whether there is a ServiceRegistration explicitly configured
2019-12-06 14:46:39 +00:00
if c . ServiceRegistration != nil {
return c . ServiceRegistration
}
// Check if HAPhysical is configured and implements ServiceRegistration
if c . HAPhysical != nil && c . HAPhysical . HAEnabled ( ) {
if disc , ok := c . HAPhysical . ( sr . ServiceRegistration ) ; ok {
return disc
}
}
// No service discovery is available.
return nil
}
2015-07-30 13:42:49 +00:00
// NewCore is used to construct a new core
2015-03-11 18:52:01 +00:00
func NewCore ( conf * CoreConfig ) ( * Core , error ) {
2016-08-15 13:42:42 +00:00
if conf . HAPhysical != nil && conf . HAPhysical . HAEnabled ( ) {
if conf . RedirectAddr == "" {
2018-01-04 15:45:40 +00:00
return nil , fmt . Errorf ( "missing API address, please set in configuration or via environment" )
2016-08-15 13:42:42 +00:00
}
2015-04-14 23:44:48 +00:00
}
2015-04-14 21:06:15 +00:00
2015-08-27 14:50:16 +00:00
if conf . DefaultLeaseTTL == 0 {
conf . DefaultLeaseTTL = defaultLeaseTTL
2015-07-30 13:42:49 +00:00
}
2015-08-27 14:50:16 +00:00
if conf . MaxLeaseTTL == 0 {
conf . MaxLeaseTTL = maxLeaseTTL
2015-07-30 13:42:49 +00:00
}
2015-08-27 14:50:16 +00:00
if conf . DefaultLeaseTTL > conf . MaxLeaseTTL {
return nil , fmt . Errorf ( "cannot have DefaultLeaseTTL larger than MaxLeaseTTL" )
2015-07-30 13:42:49 +00:00
}
2015-08-20 17:14:13 +00:00
2015-05-02 20:28:33 +00:00
// Validate the advertise addr if its given to us
2016-08-15 13:42:42 +00:00
if conf . RedirectAddr != "" {
u , err := url . Parse ( conf . RedirectAddr )
2015-05-02 20:28:33 +00:00
if err != nil {
2018-04-05 15:49:21 +00:00
return nil , errwrap . Wrapf ( "redirect address is not valid url: {{err}}" , err )
2015-05-02 20:28:33 +00:00
}
if u . Scheme == "" {
2016-08-19 14:52:14 +00:00
return nil , fmt . Errorf ( "redirect address must include scheme (ex. 'http')" )
2015-05-02 20:28:33 +00:00
}
}
2016-08-26 14:27:06 +00:00
// Make a default logger if not provided
if conf . Logger == nil {
2018-04-03 00:46:59 +00:00
conf . Logger = logging . NewVaultLogger ( log . Trace )
2016-08-26 14:27:06 +00:00
}
2020-05-13 02:00:59 +00:00
// Make a default metric sink if not provided
if conf . MetricSink == nil {
conf . MetricSink = metricsutil . BlackholeSink ( )
}
2019-10-08 17:57:15 +00:00
// Instantiate a non-nil raw config if none is provided
if conf . RawConfig == nil {
conf . RawConfig = new ( server . Config )
}
2019-03-05 19:55:07 +00:00
syncInterval := conf . CounterSyncInterval
if syncInterval . Nanoseconds ( ) == 0 {
syncInterval = 30 * time . Second
}
2019-10-17 17:33:00 +00:00
// secureRandomReader cannot be nil
if conf . SecureRandomReader == nil {
conf . SecureRandomReader = rand . Reader
}
2020-07-27 20:10:26 +00:00
clusterHeartbeatInterval := conf . ClusterHeartbeatInterval
if clusterHeartbeatInterval == 0 {
clusterHeartbeatInterval = 5 * time . Second
}
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
if conf . NumExpirationWorkers == 0 {
conf . NumExpirationWorkers = numExpirationWorkersDefault
}
2015-03-09 23:33:27 +00:00
// Setup the core
c := & Core {
2020-05-21 20:07:50 +00:00
entCore : entCore { } ,
devToken : conf . DevToken ,
physical : conf . Physical ,
serviceRegistration : conf . GetServiceRegistration ( ) ,
underlyingPhysical : conf . Physical ,
storageType : conf . StorageType ,
redirectAddr : conf . RedirectAddr ,
clusterAddr : new ( atomic . Value ) ,
clusterListener : new ( atomic . Value ) ,
seal : conf . Seal ,
router : NewRouter ( ) ,
sealed : new ( uint32 ) ,
2020-10-23 18:16:04 +00:00
sealMigrationDone : new ( uint32 ) ,
2020-05-21 20:07:50 +00:00
standby : true ,
2020-07-21 12:34:07 +00:00
standbyStopCh : new ( atomic . Value ) ,
2020-05-21 20:07:50 +00:00
baseLogger : conf . Logger ,
logger : conf . Logger . Named ( "core" ) ,
2019-02-15 02:14:56 +00:00
defaultLeaseTTL : conf . DefaultLeaseTTL ,
maxLeaseTTL : conf . MaxLeaseTTL ,
2020-08-10 10:23:44 +00:00
sentinelTraceDisabled : conf . DisableSentinelTrace ,
2019-02-15 02:14:56 +00:00
cachingDisabled : conf . DisableCache ,
clusterName : conf . ClusterName ,
2020-01-17 07:03:02 +00:00
clusterNetworkLayer : conf . ClusterNetworkLayer ,
2020-07-27 20:10:26 +00:00
clusterPeerClusterAddrsCache : cache . New ( 3 * clusterHeartbeatInterval , time . Second ) ,
2019-02-15 02:14:56 +00:00
enableMlock : ! conf . DisableMlock ,
rawEnabled : conf . EnableRaw ,
2020-02-15 02:07:31 +00:00
shutdownDoneCh : make ( chan struct { } ) ,
2019-02-15 02:14:56 +00:00
replicationState : new ( uint32 ) ,
atomicPrimaryClusterAddrs : new ( atomic . Value ) ,
atomicPrimaryFailoverAddrs : new ( atomic . Value ) ,
localClusterPrivateKey : new ( atomic . Value ) ,
localClusterCert : new ( atomic . Value ) ,
localClusterParsedCert : new ( atomic . Value ) ,
activeNodeReplicationState : new ( uint32 ) ,
keepHALockOnStepDown : new ( uint32 ) ,
replicationFailure : new ( uint32 ) ,
disablePerfStandby : true ,
activeContextCancelFunc : new ( atomic . Value ) ,
allLoggers : conf . AllLoggers ,
builtinRegistry : conf . BuiltinRegistry ,
neverBecomeActive : new ( uint32 ) ,
clusterLeaderParams : new ( atomic . Value ) ,
2019-02-20 20:12:21 +00:00
metricsHelper : conf . MetricsHelper ,
2020-05-13 02:00:59 +00:00
metricSink : conf . MetricSink ,
2019-10-17 17:33:00 +00:00
secureRandomReader : conf . SecureRandomReader ,
2020-04-16 23:34:46 +00:00
rawConfig : new ( atomic . Value ) ,
2019-03-05 19:55:07 +00:00
counters : counters {
requests : new ( uint64 ) ,
syncInterval : syncInterval ,
} ,
2020-07-27 20:10:26 +00:00
recoveryMode : conf . RecoveryMode ,
postUnsealStarted : new ( uint32 ) ,
raftJoinDoneCh : make ( chan struct { } ) ,
clusterHeartbeatInterval : clusterHeartbeatInterval ,
2020-10-29 23:47:34 +00:00
activityLogConfig : conf . ActivityLogConfig ,
2020-12-08 18:57:44 +00:00
keyRotateGracePeriod : new ( int64 ) ,
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
numExpirationWorkers : conf . NumExpirationWorkers ,
2021-03-03 18:59:50 +00:00
raftFollowerStates : raft . NewFollowerStates ( ) ,
disableAutopilot : conf . DisableAutopilot ,
2015-03-09 23:33:27 +00:00
}
2020-07-21 12:34:07 +00:00
c . standbyStopCh . Store ( make ( chan struct { } ) )
2018-07-24 20:57:25 +00:00
atomic . StoreUint32 ( c . sealed , 1 )
2020-06-17 14:50:28 +00:00
c . metricSink . SetGaugeWithLabels ( [ ] string { "core" , "unsealed" } , 0 , nil )
2018-09-05 19:52:54 +00:00
c . allLoggers = append ( c . allLoggers , c . logger )
2018-07-24 20:57:25 +00:00
2019-07-03 02:16:43 +00:00
c . router . logger = c . logger . Named ( "router" )
c . allLoggers = append ( c . allLoggers , c . router . logger )
2020-07-30 17:15:00 +00:00
c . SetConfig ( conf . RawConfig )
2018-01-23 02:44:38 +00:00
atomic . StoreUint32 ( c . replicationState , uint32 ( consts . ReplicationDRDisabled | consts . ReplicationPerformanceDisabled ) )
2018-02-23 19:47:07 +00:00
c . localClusterCert . Store ( ( [ ] byte ) ( nil ) )
c . localClusterParsedCert . Store ( ( * x509 . Certificate ) ( nil ) )
c . localClusterPrivateKey . Store ( ( * ecdsa . PrivateKey ) ( nil ) )
2018-01-23 02:44:38 +00:00
2019-02-06 02:01:18 +00:00
c . clusterLeaderParams . Store ( ( * ClusterLeaderParams ) ( nil ) )
2019-06-27 17:00:03 +00:00
c . clusterAddr . Store ( conf . ClusterAddr )
2018-08-01 19:07:37 +00:00
c . activeContextCancelFunc . Store ( ( context . CancelFunc ) ( nil ) )
2020-12-08 18:57:44 +00:00
atomic . StoreInt64 ( c . keyRotateGracePeriod , int64 ( 2 * time . Minute ) )
2018-08-01 19:07:37 +00:00
2019-10-28 16:51:45 +00:00
switch conf . ClusterCipherSuites {
2019-11-19 04:04:49 +00:00
case "tls13" , "tls12" :
2019-10-28 16:51:45 +00:00
// Do nothing, let Go use the default
case "" :
// Add in forward compatible TLS 1.3 suites, followed by handpicked 1.2 suites
c . clusterCipherSuites = [ ] uint16 {
// 1.3
tls . TLS_AES_128_GCM_SHA256 ,
tls . TLS_AES_256_GCM_SHA384 ,
tls . TLS_CHACHA20_POLY1305_SHA256 ,
// 1.2
tls . TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ,
tls . TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,
tls . TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 ,
}
default :
2017-08-30 20:28:23 +00:00
suites , err := tlsutil . ParseCiphers ( conf . ClusterCipherSuites )
if err != nil {
return nil , errwrap . Wrapf ( "error parsing cluster cipher suites: {{err}}" , err )
}
c . clusterCipherSuites = suites
}
2017-08-07 14:03:30 +00:00
// Load CORS config and provide a value for the core field.
2018-06-09 20:57:57 +00:00
c . corsConfig = & CORSConfig {
core : c ,
Enabled : new ( uint32 ) ,
}
2017-06-17 05:26:25 +00:00
2017-10-23 20:03:36 +00:00
if c . seal == nil {
2020-01-11 01:39:52 +00:00
c . seal = NewDefaultSeal ( & vaultseal . Access {
2020-05-14 13:19:27 +00:00
Wrapper : aeadwrapper . NewShamirWrapper ( & wrapping . WrapperOptions {
2020-01-11 01:39:52 +00:00
Logger : c . logger . Named ( "shamir" ) ,
} ) ,
} )
2017-10-23 20:03:36 +00:00
}
c . seal . SetCore ( c )
2018-09-18 03:03:00 +00:00
if err := coreInit ( c , conf ) ; err != nil {
return nil , err
2017-01-06 20:42:18 +00:00
}
2017-02-17 01:13:19 +00:00
if ! conf . DisableMlock {
// Ensure our memory usage is locked into physical RAM
if err := mlock . LockMemory ( ) ; err != nil {
return nil , fmt . Errorf (
"Failed to lock memory: %v\n\n" +
"This usually means that the mlock syscall is not available.\n" +
"Vault uses mlock to prevent memory from being swapped to\n" +
"disk. This requires root privileges as well as a machine\n" +
"that supports mlock. Please enable mlock on your system or\n" +
"disable Vault from using it. To disable Vault from using it,\n" +
"set the `disable_mlock` configuration option in your configuration\n" +
"file." ,
err )
}
}
var err error
2018-09-18 03:03:00 +00:00
2017-04-04 00:52:29 +00:00
// Construct a new AES-GCM barrier
2017-02-17 01:13:19 +00:00
c . barrier , err = NewAESGCMBarrier ( c . physical )
if err != nil {
2018-04-05 15:49:21 +00:00
return nil , errwrap . Wrapf ( "barrier setup failed: {{err}}" , err )
2017-02-17 01:13:19 +00:00
}
2016-09-30 04:06:40 +00:00
// We create the funcs here, then populate the given config with it so that
// the caller can share state
conf . ReloadFuncsLock = & c . reloadFuncsLock
c . reloadFuncsLock . Lock ( )
2020-02-15 19:58:05 +00:00
c . reloadFuncs = make ( map [ string ] [ ] reloadutil . ReloadFunc )
2016-09-30 04:06:40 +00:00
c . reloadFuncsLock . Unlock ( )
conf . ReloadFuncs = & c . reloadFuncs
2019-10-15 04:55:31 +00:00
// All the things happening below this are not required in
// recovery mode
if c . recoveryMode {
return c , nil
}
if conf . PluginDirectory != "" {
c . pluginDirectory , err = filepath . Abs ( conf . PluginDirectory )
if err != nil {
return nil , errwrap . Wrapf ( "core setup failed, could not verify plugin directory: {{err}}" , err )
}
}
createSecondaries ( c , conf )
if conf . HAPhysical != nil && conf . HAPhysical . HAEnabled ( ) {
c . ha = conf . HAPhysical
}
2015-03-18 22:21:41 +00:00
logicalBackends := make ( map [ string ] logical . Factory )
for k , f := range conf . LogicalBackends {
logicalBackends [ k ] = f
2015-03-15 23:25:38 +00:00
}
2018-11-07 01:21:24 +00:00
_ , ok := logicalBackends [ "kv" ]
2015-09-19 22:24:53 +00:00
if ! ok {
2017-09-15 13:02:29 +00:00
logicalBackends [ "kv" ] = PassthroughBackendFactory
2015-09-19 22:24:53 +00:00
}
2018-11-07 01:21:24 +00:00
2015-09-10 01:58:09 +00:00
logicalBackends [ "cubbyhole" ] = CubbyholeBackendFactory
2018-09-18 03:03:00 +00:00
logicalBackends [ systemMountType ] = func ( ctx context . Context , config * logical . BackendConfig ) ( logical . Backend , error ) {
2018-09-05 19:52:54 +00:00
sysBackendLogger := conf . Logger . Named ( "system" )
c . AddLogger ( sysBackendLogger )
b := NewSystemBackend ( c , sysBackendLogger )
2018-01-19 06:44:44 +00:00
if err := b . Setup ( ctx , config ) ; err != nil {
Backend plugin system (#2874)
* Add backend plugin changes
* Fix totp backend plugin tests
* Fix logical/plugin InvalidateKey test
* Fix plugin catalog CRUD test, fix NoopBackend
* Clean up commented code block
* Fix system backend mount test
* Set plugin_name to omitempty, fix handleMountTable config parsing
* Clean up comments, keep shim connections alive until cleanup
* Include pluginClient, disallow LookupPlugin call from within a plugin
* Add wrapper around backendPluginClient for proper cleanup
* Add logger shim tests
* Add logger, storage, and system shim tests
* Use pointer receivers for system view shim
* Use plugin name if no path is provided on mount
* Enable plugins for auth backends
* Add backend type attribute, move builtin/plugin/package
* Fix merge conflict
* Fix missing plugin name in mount config
* Add integration tests on enabling auth backend plugins
* Remove dependency cycle on mock-plugin
* Add passthrough backend plugin, use logical.BackendType to determine lease generation
* Remove vault package dependency on passthrough package
* Add basic impl test for passthrough plugin
* Incorporate feedback; set b.backend after shims creation on backendPluginServer
* Fix totp plugin test
* Add plugin backends docs
* Fix tests
* Fix builtin/plugin tests
* Remove flatten from PluginRunner fields
* Move mock plugin to logical/plugin, remove totp and passthrough plugins
* Move pluginMap into newPluginClient
* Do not create storage RPC connection on HandleRequest and HandleExistenceCheck
* Change shim logger's Fatal to no-op
* Change BackendType to uint32, match UX backend types
* Change framework.Backend Setup signature
* Add Setup func to logical.Backend interface
* Move OptionallyEnableMlock call into plugin.Serve, update docs and comments
* Remove commented var in plugin package
* RegisterLicense on logical.Backend interface (#3017)
* Add RegisterLicense to logical.Backend interface
* Update RegisterLicense to use callback func on framework.Backend
* Refactor framework.Backend.RegisterLicense
* plugin: Prevent plugin.SystemViewClient.ResponseWrapData from getting JWTs
* plugin: Revert BackendType to remove TypePassthrough and related references
* Fix typo in plugin backends docs
2017-07-20 17:28:40 +00:00
return nil , err
}
return b , nil
2015-03-15 23:25:38 +00:00
}
2018-01-19 06:44:44 +00:00
logicalBackends [ "identity" ] = func ( ctx context . Context , config * logical . BackendConfig ) ( logical . Backend , error ) {
2018-09-05 19:52:54 +00:00
identityLogger := conf . Logger . Named ( "identity" )
c . AddLogger ( identityLogger )
return NewIdentityStore ( ctx , c , config , identityLogger )
2017-10-11 17:21:20 +00:00
}
2018-09-18 03:03:00 +00:00
addExtraLogicalBackends ( c , logicalBackends )
2015-03-18 22:21:41 +00:00
c . logicalBackends = logicalBackends
2015-03-15 23:25:38 +00:00
2015-03-31 01:07:05 +00:00
credentialBackends := make ( map [ string ] logical . Factory )
2015-03-18 22:21:41 +00:00
for k , f := range conf . CredentialBackends {
credentialBackends [ k ] = f
}
2018-01-19 06:44:44 +00:00
credentialBackends [ "token" ] = func ( ctx context . Context , config * logical . BackendConfig ) ( logical . Backend , error ) {
2018-09-05 19:52:54 +00:00
tsLogger := conf . Logger . Named ( "token" )
c . AddLogger ( tsLogger )
return NewTokenStore ( ctx , tsLogger , c , config )
2015-03-19 02:11:52 +00:00
}
2018-09-18 03:03:00 +00:00
addExtraCredentialBackends ( c , credentialBackends )
2015-03-18 22:21:41 +00:00
c . credentialBackends = credentialBackends
2015-03-27 20:45:13 +00:00
auditBackends := make ( map [ string ] audit . Factory )
for k , f := range conf . AuditBackends {
auditBackends [ k ] = f
}
c . auditBackends = auditBackends
2016-04-04 14:44:22 +00:00
2018-03-27 20:23:33 +00:00
uiStoragePrefix := systemBarrierPrefix + "ui"
c . uiConfig = NewUIConfig ( conf . EnableUI , physical . NewView ( c . physical , uiStoragePrefix ) , NewBarrierView ( c . barrier , uiStoragePrefix ) )
2019-09-03 15:59:56 +00:00
c . clusterListener . Store ( ( * cluster . Listener ) ( nil ) )
2020-06-26 21:13:16 +00:00
quotasLogger := conf . Logger . Named ( "quotas" )
c . allLoggers = append ( c . allLoggers , quotasLogger )
c . quotaManager , err = quotas . NewManager ( quotasLogger , c . quotaLeaseWalker , c . metricSink )
2020-06-11 19:07:59 +00:00
if err != nil {
return nil , err
}
2020-06-30 22:21:18 +00:00
err = c . adjustForSealMigration ( conf . UnwrapSeal )
if err != nil {
return nil , err
}
2017-10-23 20:03:36 +00:00
return c , nil
2015-03-09 23:33:27 +00:00
}
2015-06-18 01:23:59 +00:00
// Shutdown is invoked when the Vault instance is about to be terminated. It
// should not be accessible as part of an API call as it will cause an availability
// problem. It is only used to gracefully quit in the case of HA so that failover
// happens as quickly as possible.
func ( c * Core ) Shutdown ( ) error {
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "shutdown called" )
2020-02-15 02:07:31 +00:00
err := c . sealInternal ( )
c . stateLock . Lock ( )
defer c . stateLock . Unlock ( )
if c . shutdownDoneCh != nil {
close ( c . shutdownDoneCh )
c . shutdownDoneCh = nil
}
return err
}
// ShutdownDone returns a channel that will be closed after Shutdown completes
func ( c * Core ) ShutdownDone ( ) <- chan struct { } {
return c . shutdownDoneCh
2015-06-18 01:23:59 +00:00
}
2017-06-17 04:04:55 +00:00
// CORSConfig returns the current CORS configuration
func ( c * Core ) CORSConfig ( ) * CORSConfig {
return c . corsConfig
}
2018-01-19 06:44:44 +00:00
func ( c * Core ) GetContext ( ) ( context . Context , context . CancelFunc ) {
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
2018-09-18 03:03:00 +00:00
return context . WithCancel ( namespace . RootContext ( c . activeContext ) )
2018-01-19 06:44:44 +00:00
}
2015-03-09 23:33:27 +00:00
// Sealed checks if the Vault is current sealed
2018-07-24 20:57:25 +00:00
func ( c * Core ) Sealed ( ) bool {
return atomic . LoadUint32 ( c . sealed ) == 1
2015-03-09 23:33:27 +00:00
}
2015-03-11 18:52:01 +00:00
// SecretProgress returns the number of keys provided so far
2017-01-17 16:47:06 +00:00
func ( c * Core ) SecretProgress ( ) ( int , string ) {
2015-03-11 18:52:01 +00:00
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
2017-01-17 16:47:06 +00:00
switch c . unlockInfo {
case nil :
return 0 , ""
default :
return len ( c . unlockInfo . Parts ) , c . unlockInfo . Nonce
}
2015-03-11 18:52:01 +00:00
}
2015-10-28 19:59:39 +00:00
// ResetUnsealProcess removes the current unlock parts from memory, to reset
// the unsealing process
func ( c * Core ) ResetUnsealProcess ( ) {
c . stateLock . Lock ( )
defer c . stateLock . Unlock ( )
2017-01-17 16:47:06 +00:00
c . unlockInfo = nil
2015-10-28 19:59:39 +00:00
}
2020-10-23 18:16:04 +00:00
func ( c * Core ) UnsealMigrate ( key [ ] byte ) ( bool , error ) {
err := c . unsealFragment ( key , true )
return ! c . Sealed ( ) , err
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
// Unseal is used to provide one of the key parts to unseal the Vault.
func ( c * Core ) Unseal ( key [ ] byte ) ( bool , error ) {
err := c . unsealFragment ( key , false )
return ! c . Sealed ( ) , err
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
// unseal takes a key fragment and attempts to use it to unseal Vault.
// Vault may remain unsealed afterwards even when no error is returned,
// depending on whether enough key fragments were provided to meet the
// target threshold.
//
// The provided key should be a recovery key fragment if the seal
// is an autoseal, or a regular seal key fragment for shamir. In
// migration scenarios "seal" in the preceding sentance refers to
// the migration seal in c.migrationInfo.seal.
//
// We use getUnsealKey to work out if we have enough fragments,
// and if we don't have enough we return early. Otherwise we get
// back the combined key.
//
// For legacy shamir the combined key *is* the master key. For
// shamir the combined key is used to decrypt the master key
// read from storage. For autoseal the combined key isn't used
// except to verify that the stored recovery key matches.
//
// In migration scenarios a side-effect of unsealing is that
// the members of c.migrationInfo are populated (excluding
// .seal, which must already be populated before unseal is called.)
func ( c * Core ) unsealFragment ( key [ ] byte , migrate bool ) error {
2015-04-08 23:43:17 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "unseal" } , time . Now ( ) )
2017-11-07 20:15:39 +00:00
c . stateLock . Lock ( )
defer c . stateLock . Unlock ( )
2018-01-19 06:44:44 +00:00
ctx := context . Background ( )
2020-10-23 18:16:04 +00:00
if migrate && c . migrationInfo == nil {
return fmt . Errorf ( "can't perform a seal migration, no migration seal found" )
}
if migrate && c . isRaftUnseal ( ) {
return fmt . Errorf ( "can't perform a seal migration while joining a raft cluster" )
}
if ! migrate && c . migrationInfo != nil {
done , err := c . sealMigrated ( ctx )
if err != nil {
return fmt . Errorf ( "error checking to see if seal is migrated: %w" , err )
}
if ! done {
return fmt . Errorf ( "migrate option not provided and seal migration is pending" )
}
}
c . logger . Debug ( "unseal key supplied" , "migrate" , migrate )
2017-11-07 20:15:39 +00:00
// Explicitly check for init status. This also checks if the seal
// configuration is valid (i.e. non-nil).
2018-01-19 06:44:44 +00:00
init , err := c . Initialized ( ctx )
2017-11-07 20:15:39 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return err
2017-11-07 20:15:39 +00:00
}
2019-06-20 19:14:58 +00:00
if ! init && ! c . isRaftUnseal ( ) {
2020-10-23 18:16:04 +00:00
return ErrNotInit
2017-11-07 20:15:39 +00:00
}
2015-03-12 18:20:27 +00:00
// Verify the key length
min , max := c . barrier . KeyLength ( )
max += shamir . ShareOverhead
if len ( key ) < min {
2020-10-23 18:16:04 +00:00
return & ErrInvalidKey { fmt . Sprintf ( "key is shorter than minimum %d bytes" , min ) }
2015-03-12 18:20:27 +00:00
}
if len ( key ) > max {
2020-10-23 18:16:04 +00:00
return & ErrInvalidKey { fmt . Sprintf ( "key is longer than maximum %d bytes" , max ) }
2015-03-12 18:20:27 +00:00
}
2017-11-07 20:15:39 +00:00
// Check if already unsealed
2018-07-24 20:57:25 +00:00
if ! c . Sealed ( ) {
2020-10-23 18:16:04 +00:00
return nil
2017-11-07 20:15:39 +00:00
}
2018-10-23 06:34:02 +00:00
sealToUse := c . seal
2020-10-23 18:16:04 +00:00
if migrate {
2019-10-18 18:46:00 +00:00
c . logger . Info ( "unsealing using migration seal" )
2020-02-13 21:27:31 +00:00
sealToUse = c . migrationInfo . seal
2017-11-07 20:15:39 +00:00
}
2020-10-23 18:16:04 +00:00
newKey , err := c . recordUnsealPart ( key )
if ! newKey || err != nil {
return err
2017-02-17 01:13:19 +00:00
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
// getUnsealKey returns either a recovery key (in the case of an autoseal)
// or a master key (legacy shamir) or an unseal key (new-style shamir).
combinedKey , err := c . getUnsealKey ( ctx , sealToUse )
if err != nil || combinedKey == nil {
return err
}
if migrate {
c . migrationInfo . unsealKey = combinedKey
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
if c . isRaftUnseal ( ) {
return c . unsealWithRaft ( combinedKey )
}
masterKey , err := c . unsealKeyToMasterKeyPreUnseal ( ctx , sealToUse , combinedKey )
if err != nil {
return err
}
return c . unsealInternal ( ctx , masterKey )
}
2019-10-18 18:46:00 +00:00
2020-10-23 18:16:04 +00:00
func ( c * Core ) unsealWithRaft ( combinedKey [ ] byte ) error {
ctx := context . Background ( )
2019-10-18 18:46:00 +00:00
2020-10-23 18:16:04 +00:00
if c . seal . BarrierType ( ) == wrapping . Shamir {
// If this is a legacy shamir seal this serves no purpose but it
// doesn't hurt.
err := c . seal . GetAccess ( ) . Wrapper . ( * aeadwrapper . ShamirWrapper ) . SetAESGCMKeyBytes ( combinedKey )
if err != nil {
return err
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
switch c . raftInfo . joinInProgress {
case true :
// JoinRaftCluster is already trying to perform a join based on retry_join configuration.
// Inform that routine that unseal key validation is complete so that it can continue to
// try and join possible leader nodes, and wait for it to complete.
2020-01-14 01:02:16 +00:00
2020-10-23 18:16:04 +00:00
atomic . StoreUint32 ( c . postUnsealStarted , 1 )
2020-01-14 01:02:16 +00:00
2020-10-23 18:16:04 +00:00
c . logger . Info ( "waiting for raft retry join process to complete" )
<- c . raftJoinDoneCh
2020-01-14 01:02:16 +00:00
2020-10-23 18:16:04 +00:00
default :
// This is the case for manual raft join. Send the answer to the leader node and
// wait for data to start streaming in.
if err := c . joinRaftSendAnswer ( ctx , c . seal . GetAccess ( ) , c . raftInfo ) ; err != nil {
return err
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
// Reset the state
c . raftInfo = nil
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
go func ( ) {
var masterKey [ ] byte
keyringFound := false
// Wait until we at least have the keyring before we attempt to
// unseal the node.
for {
if ! keyringFound {
keys , err := c . underlyingPhysical . List ( ctx , keyringPrefix )
if err != nil {
c . logger . Error ( "failed to list physical keys" , "error" , err )
return
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
if strutil . StrListContains ( keys , "keyring" ) {
keyringFound = true
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
}
if keyringFound && len ( masterKey ) == 0 {
var err error
masterKey , err = c . unsealKeyToMasterKeyPreUnseal ( ctx , c . seal , combinedKey )
if err != nil {
c . logger . Error ( "failed to read master key" , "error" , err )
2019-10-18 18:46:00 +00:00
return
2019-06-20 19:14:58 +00:00
}
}
2020-10-23 18:16:04 +00:00
if keyringFound && len ( masterKey ) > 0 {
err := c . unsealInternal ( ctx , masterKey )
if err != nil {
c . logger . Error ( "failed to unseal" , "error" , err )
}
return
}
time . Sleep ( 1 * time . Second )
}
} ( )
2017-02-17 01:13:19 +00:00
2020-10-23 18:16:04 +00:00
return nil
2017-02-17 01:13:19 +00:00
}
2020-10-23 18:16:04 +00:00
// recordUnsealPart takes in a key fragment, and returns true if it's a new fragment.
func ( c * Core ) recordUnsealPart ( key [ ] byte ) ( bool , error ) {
2015-03-11 18:43:36 +00:00
// Check if we already have this piece
2017-01-17 16:47:06 +00:00
if c . unlockInfo != nil {
for _ , existing := range c . unlockInfo . Parts {
2017-02-17 01:13:19 +00:00
if subtle . ConstantTimeCompare ( existing , key ) == 1 {
2020-10-23 18:16:04 +00:00
return false , nil
2017-01-17 16:47:06 +00:00
}
}
} else {
uuid , err := uuid . GenerateUUID ( )
if err != nil {
2020-10-23 18:16:04 +00:00
return false , err
2017-01-17 16:47:06 +00:00
}
c . unlockInfo = & unlockInformation {
Nonce : uuid ,
2015-03-11 18:43:36 +00:00
}
}
// Store this key
2017-01-17 16:47:06 +00:00
c . unlockInfo . Parts = append ( c . unlockInfo . Parts , key )
2020-10-23 18:16:04 +00:00
return true , nil
}
2015-03-11 18:43:36 +00:00
2020-10-23 18:16:04 +00:00
// getUnsealKey uses key fragments recorded by recordUnsealPart and
// returns the combined key if the key share threshold is met.
// If the key fragments are part of a recovery key, also verify that
// it matches the stored recovery key on disk.
func ( c * Core ) getUnsealKey ( ctx context . Context , seal Seal ) ( [ ] byte , error ) {
2018-10-23 06:34:02 +00:00
var config * SealConfig
var err error
2019-06-20 19:14:58 +00:00
switch {
2020-10-23 18:16:04 +00:00
case seal . RecoveryKeySupported ( ) :
2018-10-23 06:34:02 +00:00
config , err = seal . RecoveryConfig ( ctx )
2019-06-20 19:14:58 +00:00
case c . isRaftUnseal ( ) :
// Ignore follower's seal config and refer to leader's barrier
// configuration.
2019-10-11 18:56:59 +00:00
config = c . raftInfo . leaderBarrierConfig
2019-06-20 19:14:58 +00:00
default :
2018-10-23 06:34:02 +00:00
config , err = seal . BarrierConfig ( ctx )
}
if err != nil {
return nil , err
}
2017-11-07 20:15:39 +00:00
// Check if we don't have enough keys to unlock, proceed through the rest of
// the call only if we have met the threshold
2017-01-17 16:47:06 +00:00
if len ( c . unlockInfo . Parts ) < config . SecretThreshold {
2016-08-19 20:45:17 +00:00
if c . logger . IsDebug ( ) {
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "cannot unseal, not enough keys" , "keys" , len ( c . unlockInfo . Parts ) , "threshold" , config . SecretThreshold , "nonce" , c . unlockInfo . Nonce )
2016-08-19 20:45:17 +00:00
}
2017-02-17 01:13:19 +00:00
return nil , nil
2015-03-11 18:43:36 +00:00
}
2017-02-17 01:13:19 +00:00
defer func ( ) {
c . unlockInfo = nil
} ( )
2017-11-07 20:15:39 +00:00
// Recover the split key. recoveredKey is the shamir combined
// key, or the single provided key if the threshold is 1.
2020-10-23 18:16:04 +00:00
var unsealKey [ ] byte
2015-03-11 18:43:36 +00:00
if config . SecretThreshold == 1 {
2020-10-23 18:16:04 +00:00
unsealKey = make ( [ ] byte , len ( c . unlockInfo . Parts [ 0 ] ) )
copy ( unsealKey , c . unlockInfo . Parts [ 0 ] )
2015-03-11 18:43:36 +00:00
} else {
2020-10-23 18:16:04 +00:00
unsealKey , err = shamir . Combine ( c . unlockInfo . Parts )
2015-03-11 18:43:36 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return nil , errwrap . Wrapf ( "failed to compute combined key: {{err}}" , err )
2015-03-11 18:43:36 +00:00
}
}
2020-10-23 18:16:04 +00:00
if seal . RecoveryKeySupported ( ) {
if err := seal . VerifyRecoveryKey ( ctx , unsealKey ) ; err != nil {
2017-11-07 20:15:39 +00:00
return nil , err
}
2020-10-23 18:16:04 +00:00
}
2017-11-07 20:15:39 +00:00
2020-10-23 18:16:04 +00:00
return unsealKey , nil
}
2017-11-07 20:15:39 +00:00
2020-10-23 18:16:04 +00:00
// sealMigrated must be called with the stateLock held. It returns true if
// the seal configured in HCL and the seal configured in storage match.
// For the auto->auto same seal migration scenario, it will return false even
// if the preceding conditions are true but we cannot decrypt the master key
// in storage using the configured seal.
func ( c * Core ) sealMigrated ( ctx context . Context ) ( bool , error ) {
if atomic . LoadUint32 ( c . sealMigrationDone ) == 1 {
return true , nil
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
existBarrierSealConfig , existRecoverySealConfig , err := c . PhysicalSealConfigs ( ctx )
if err != nil {
return false , err
2020-02-13 21:27:31 +00:00
}
2019-03-04 22:11:56 +00:00
2020-10-23 18:16:04 +00:00
if existBarrierSealConfig . Type != c . seal . BarrierType ( ) {
return false , nil
}
if c . seal . RecoveryKeySupported ( ) && existRecoverySealConfig . Type != c . seal . RecoveryType ( ) {
return false , nil
}
2019-03-04 22:11:56 +00:00
2020-10-23 18:16:04 +00:00
if c . seal . BarrierType ( ) != c . migrationInfo . seal . BarrierType ( ) {
return true , nil
}
2020-07-16 19:14:29 +00:00
2020-10-23 18:16:04 +00:00
// The above checks can handle the auto->shamir and shamir->auto
// and auto1->auto2 cases. For auto1->auto1, we need to actually try
// to read and decrypt the keys.
keysMig , errMig := c . migrationInfo . seal . GetStoredKeys ( ctx )
keys , err := c . seal . GetStoredKeys ( ctx )
switch {
case len ( keys ) > 0 && err == nil :
return true , nil
case len ( keysMig ) > 0 && errMig == nil :
return false , nil
case errors . Is ( err , & ErrDecrypt { } ) && errors . Is ( errMig , & ErrDecrypt { } ) :
return false , fmt . Errorf ( "decrypt error, neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v" , errMig , err )
default :
return false , fmt . Errorf ( "neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v" , errMig , err )
2020-07-16 19:14:29 +00:00
}
2020-10-23 18:16:04 +00:00
}
2020-07-16 19:14:29 +00:00
2020-10-23 18:16:04 +00:00
// migrateSeal must be called with the stateLock held.
func ( c * Core ) migrateSeal ( ctx context . Context ) error {
2020-02-13 21:27:31 +00:00
if c . migrationInfo == nil {
return nil
}
2019-03-04 22:11:56 +00:00
2020-10-23 18:16:04 +00:00
ok , err := c . sealMigrated ( ctx )
2020-02-13 21:27:31 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return fmt . Errorf ( "error checking if seal is migrated or not: %w" , err )
2020-02-13 21:27:31 +00:00
}
2020-10-23 18:16:04 +00:00
if ok {
c . logger . Info ( "migration is already performed" )
2020-02-13 21:27:31 +00:00
return nil
}
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
c . logger . Info ( "seal migration initiated" )
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
switch {
case c . migrationInfo . seal . RecoveryKeySupported ( ) && c . seal . RecoveryKeySupported ( ) :
2020-10-23 18:16:04 +00:00
c . logger . Info ( "migrating from one auto-unseal to another" , "from" ,
c . migrationInfo . seal . BarrierType ( ) , "to" , c . seal . BarrierType ( ) )
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
// Set the recovery and barrier keys to be the same.
recoveryKey , err := c . migrationInfo . seal . RecoveryKey ( ctx )
if err != nil {
return errwrap . Wrapf ( "error getting recovery key to set on new seal: {{err}}" , err )
}
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
if err := c . seal . SetRecoveryKey ( ctx , recoveryKey ) ; err != nil {
return errwrap . Wrapf ( "error setting new recovery key information during migrate: {{err}}" , err )
}
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
barrierKeys , err := c . migrationInfo . seal . GetStoredKeys ( ctx )
if err != nil {
return errwrap . Wrapf ( "error getting stored keys to set on new seal: {{err}}" , err )
}
2018-10-23 06:34:02 +00:00
2020-02-13 21:27:31 +00:00
if err := c . seal . SetStoredKeys ( ctx , barrierKeys ) ; err != nil {
return errwrap . Wrapf ( "error setting new barrier key information during migrate: {{err}}" , err )
}
2018-10-23 06:34:02 +00:00
2020-02-13 21:27:31 +00:00
case c . migrationInfo . seal . RecoveryKeySupported ( ) :
c . logger . Info ( "migrating from one auto-unseal to shamir" , "from" , c . migrationInfo . seal . BarrierType ( ) )
// Auto to Shamir, since recovery key isn't supported on new seal
2018-10-23 06:34:02 +00:00
2020-10-23 18:16:04 +00:00
recoveryKey , err := c . migrationInfo . seal . RecoveryKey ( ctx )
if err != nil {
return errwrap . Wrapf ( "error getting recovery key to set on new seal: {{err}}" , err )
2020-02-13 21:27:31 +00:00
}
2018-10-23 06:34:02 +00:00
2020-10-23 18:16:04 +00:00
// We have recovery keys; we're going to use them as the new shamir KeK.
err = c . seal . GetAccess ( ) . Wrapper . ( * aeadwrapper . ShamirWrapper ) . SetAESGCMKeyBytes ( recoveryKey )
2020-02-13 21:27:31 +00:00
if err != nil {
return errwrap . Wrapf ( "failed to set master key in seal: {{err}}" , err )
}
2018-10-23 06:34:02 +00:00
2020-10-23 18:16:04 +00:00
barrierKeys , err := c . migrationInfo . seal . GetStoredKeys ( ctx )
if err != nil {
return errwrap . Wrapf ( "error getting stored keys to set on new seal: {{err}}" , err )
}
if err := c . seal . SetStoredKeys ( ctx , barrierKeys ) ; err != nil {
2020-02-13 21:27:31 +00:00
return errwrap . Wrapf ( "error setting new barrier key information during migrate: {{err}}" , err )
2018-10-23 06:34:02 +00:00
}
2020-02-13 21:27:31 +00:00
case c . seal . RecoveryKeySupported ( ) :
c . logger . Info ( "migrating from shamir to auto-unseal" , "to" , c . seal . BarrierType ( ) )
// Migration is happening from shamir -> auto. In this case use the shamir
// combined key that was used to store the master key as the new recovery key.
2020-10-23 18:16:04 +00:00
if err := c . seal . SetRecoveryKey ( ctx , c . migrationInfo . unsealKey ) ; err != nil {
2020-02-13 21:27:31 +00:00
return errwrap . Wrapf ( "error setting new recovery key information: {{err}}" , err )
}
2018-10-23 06:34:02 +00:00
2020-02-13 21:27:31 +00:00
// Generate a new master key
newMasterKey , err := c . barrier . GenerateKey ( c . secureRandomReader )
2018-10-23 06:34:02 +00:00
if err != nil {
2020-02-13 21:27:31 +00:00
return errwrap . Wrapf ( "error generating new master key: {{err}}" , err )
2018-10-23 06:34:02 +00:00
}
2020-02-13 21:27:31 +00:00
2020-10-23 18:16:04 +00:00
// Rekey the barrier. This handles the case where the shamir seal we're
// migrating from was a legacy seal without a stored master key.
2020-02-13 21:27:31 +00:00
if err := c . barrier . Rekey ( ctx , newMasterKey ) ; err != nil {
return errwrap . Wrapf ( "error rekeying barrier during migration: {{err}}" , err )
2018-10-23 06:34:02 +00:00
}
2017-11-07 20:15:39 +00:00
2020-02-13 21:27:31 +00:00
// Store the new master key
if err := c . seal . SetStoredKeys ( ctx , [ ] [ ] byte { newMasterKey } ) ; err != nil {
return errwrap . Wrapf ( "error storing new master key: {{err}}" , err )
2017-11-07 20:15:39 +00:00
}
2020-02-13 21:27:31 +00:00
default :
return errors . New ( "unhandled migration case (shamir to shamir)" )
2017-11-07 20:15:39 +00:00
}
2020-10-23 18:16:04 +00:00
err = c . migrateSealConfig ( ctx )
2020-02-13 21:27:31 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return errwrap . Wrapf ( "error storing new seal configs: {{err}}" , err )
2020-02-13 21:27:31 +00:00
}
2020-10-23 18:16:04 +00:00
// Flag migration performed for seal-rewrap later
atomic . StoreUint32 ( c . sealMigrationDone , 1 )
2020-02-13 21:27:31 +00:00
c . logger . Info ( "seal migration complete" )
return nil
2017-01-06 21:30:43 +00:00
}
2017-11-07 20:15:39 +00:00
// unsealInternal takes in the master key and attempts to unseal the barrier.
// N.B.: This must be called with the state write lock held.
2020-10-23 18:16:04 +00:00
func ( c * Core ) unsealInternal ( ctx context . Context , masterKey [ ] byte ) error {
2015-03-11 18:43:36 +00:00
// Attempt to unlock
2018-01-19 06:44:44 +00:00
if err := c . barrier . Unseal ( ctx , masterKey ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2015-03-11 18:43:36 +00:00
}
2018-09-18 03:03:00 +00:00
if err := preUnsealInternal ( ctx , c ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2018-09-18 03:03:00 +00:00
}
2019-02-15 02:14:56 +00:00
if err := c . startClusterListener ( ctx ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2019-02-15 02:14:56 +00:00
}
2020-06-23 19:04:13 +00:00
if err := c . startRaftBackend ( ctx ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2019-06-20 19:14:58 +00:00
}
2020-02-15 00:39:13 +00:00
if err := c . setupReplicationResolverHandler ( ) ; err != nil {
c . logger . Warn ( "failed to start replication resolver server" , "error" , err )
}
2015-04-14 21:06:15 +00:00
// Do post-unseal setup if HA is not enabled
if c . ha == nil {
2016-08-15 13:42:42 +00:00
// We still need to set up cluster info even if it's not part of a
2017-01-04 21:44:03 +00:00
// cluster right now. This also populates the cached cluster object.
2018-01-19 06:44:44 +00:00
if err := c . setupCluster ( ctx ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "cluster setup failed" , "error" , err )
2016-08-15 13:42:42 +00:00
c . barrier . Seal ( )
2018-04-03 00:46:59 +00:00
c . logger . Warn ( "vault is sealed" )
2020-10-23 18:16:04 +00:00
return err
2016-08-15 13:42:42 +00:00
}
2017-02-17 01:13:19 +00:00
2020-02-13 21:27:31 +00:00
if err := c . migrateSeal ( ctx ) ; err != nil {
c . logger . Error ( "seal migration error" , "error" , err )
c . barrier . Seal ( )
c . logger . Warn ( "vault is sealed" )
2020-10-23 18:16:04 +00:00
return err
2020-02-13 21:27:31 +00:00
}
2018-09-18 03:03:00 +00:00
ctx , ctxCancel := context . WithCancel ( namespace . RootContext ( nil ) )
if err := c . postUnseal ( ctx , ctxCancel , standardUnsealStrategy { } ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "post-unseal setup failed" , "error" , err )
2015-04-14 21:06:15 +00:00
c . barrier . Seal ( )
2018-04-03 00:46:59 +00:00
c . logger . Warn ( "vault is sealed" )
2020-10-23 18:16:04 +00:00
return err
2015-04-14 21:06:15 +00:00
}
2017-02-17 01:13:19 +00:00
2020-02-13 21:27:31 +00:00
// Force a cache bust here, which will also run migration code
if c . seal . RecoveryKeySupported ( ) {
c . seal . SetRecoveryConfig ( ctx , nil )
}
2015-12-17 18:48:08 +00:00
c . standby = false
2015-04-14 21:06:15 +00:00
} else {
// Go to standby mode, wait until we are active to unseal
c . standbyDoneCh = make ( chan struct { } )
2020-07-31 14:01:51 +00:00
c . manualStepDownCh = make ( chan struct { } , 1 )
2020-07-21 12:34:07 +00:00
c . standbyStopCh . Store ( make ( chan struct { } ) )
go c . runStandby ( c . standbyDoneCh , c . manualStepDownCh , c . standbyStopCh . Load ( ) . ( chan struct { } ) )
2015-03-11 22:19:41 +00:00
}
2018-07-24 20:57:25 +00:00
// Success!
atomic . StoreUint32 ( c . sealed , 0 )
2020-06-17 14:50:28 +00:00
c . metricSink . SetGaugeWithLabels ( [ ] string { "core" , "unsealed" } , 1 , nil )
2018-07-24 20:57:25 +00:00
2019-06-20 19:14:58 +00:00
if c . logger . IsInfo ( ) {
c . logger . Info ( "vault is unsealed" )
}
2019-12-06 14:46:39 +00:00
if c . serviceRegistration != nil {
2020-01-24 17:42:03 +00:00
if err := c . serviceRegistration . NotifySealedStateChange ( false ) ; err != nil {
2019-12-06 14:46:39 +00:00
if c . logger . IsWarn ( ) {
c . logger . Warn ( "failed to notify unsealed status" , "error" , err )
2016-04-28 17:56:41 +00:00
}
2016-04-23 02:55:17 +00:00
}
2020-06-29 20:02:49 +00:00
if err := c . serviceRegistration . NotifyInitializedStateChange ( true ) ; err != nil {
if c . logger . IsWarn ( ) {
c . logger . Warn ( "failed to notify initialized status" , "error" , err )
}
}
2016-04-23 02:55:17 +00:00
}
2020-10-23 18:16:04 +00:00
return nil
2015-03-09 23:33:27 +00:00
}
2015-03-10 00:45:34 +00:00
2016-05-20 17:03:54 +00:00
// SealWithRequest takes in a logical.Request, acquires the lock, and passes
// through to sealInternal
2018-07-24 21:50:49 +00:00
func ( c * Core ) SealWithRequest ( httpCtx context . Context , req * logical . Request ) error {
2016-05-20 17:03:54 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "seal-with-request" } , time . Now ( ) )
2018-07-24 20:57:25 +00:00
if c . Sealed ( ) {
2016-05-20 17:03:54 +00:00
return nil
}
2018-07-24 20:57:25 +00:00
c . stateLock . RLock ( )
2017-08-04 20:42:51 +00:00
// This will unlock the read lock
2018-01-19 06:44:44 +00:00
// We use background context since we may not be active
2018-09-18 03:03:00 +00:00
ctx , cancel := context . WithCancel ( namespace . RootContext ( nil ) )
2018-07-24 21:50:49 +00:00
defer cancel ( )
go func ( ) {
select {
case <- ctx . Done ( ) :
case <- httpCtx . Done ( ) :
cancel ( )
}
} ( )
// This will unlock the read lock
return c . sealInitCommon ( ctx , req )
2016-05-20 17:03:54 +00:00
}
// Seal takes in a token and creates a logical.Request, acquires the lock, and
// passes through to sealInternal
func ( c * Core ) Seal ( token string ) error {
2015-04-08 23:43:17 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "seal" } , time . Now ( ) )
2016-02-27 00:43:55 +00:00
2018-07-24 20:57:25 +00:00
if c . Sealed ( ) {
2016-05-20 17:03:54 +00:00
return nil
2015-03-10 00:45:34 +00:00
}
2015-03-31 16:59:02 +00:00
2018-07-24 20:57:25 +00:00
c . stateLock . RLock ( )
2016-01-07 20:10:05 +00:00
req := & logical . Request {
Operation : logical . UpdateOperation ,
Path : "sys/seal" ,
ClientToken : token ,
}
2017-08-04 20:42:51 +00:00
// This will unlock the read lock
2018-01-19 06:44:44 +00:00
// We use background context since we may not be active
2018-09-18 03:03:00 +00:00
return c . sealInitCommon ( namespace . RootContext ( nil ) , req )
2016-05-20 17:03:54 +00:00
}
// sealInitCommon is common logic for Seal and SealWithRequest and is used to
// re-seal the Vault. This requires the Vault to be unsealed again to perform
2017-08-04 20:42:51 +00:00
// any further operations. Note: this function will read-unlock the state lock.
2018-01-19 06:44:44 +00:00
func ( c * Core ) sealInitCommon ( ctx context . Context , req * logical . Request ) ( retErr error ) {
2016-05-20 17:03:54 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "seal-internal" } , time . Now ( ) )
2019-06-19 13:40:57 +00:00
var unlocked bool
defer func ( ) {
if ! unlocked {
c . stateLock . RUnlock ( )
}
} ( )
2016-05-20 17:03:54 +00:00
if req == nil {
retErr = multierror . Append ( retErr , errors . New ( "nil request to seal" ) )
return retErr
}
2018-04-14 01:49:40 +00:00
// Since there is no token store in standby nodes, sealing cannot be done.
// Ideally, the request has to be forwarded to leader node for validation
// and the operation should be performed. But for now, just returning with
// an error and recommending a vault restart, which essentially does the
// same thing.
if c . standby {
c . logger . Error ( "vault cannot seal when in standby mode; please restart instead" )
retErr = multierror . Append ( retErr , errors . New ( "vault cannot seal when in standby mode; please restart instead" ) )
return retErr
}
2018-09-18 03:03:00 +00:00
acl , te , entity , identityPolicies , err := c . fetchACLTokenEntryAndEntity ( ctx , req )
2015-03-31 16:59:02 +00:00
if err != nil {
2016-05-16 20:11:33 +00:00
retErr = multierror . Append ( retErr , err )
return retErr
2015-03-31 16:59:02 +00:00
}
2016-05-20 17:03:54 +00:00
// Audit-log the request before going any further
auth := & logical . Auth {
2018-09-18 03:03:00 +00:00
ClientToken : req . ClientToken ,
2018-10-15 16:56:24 +00:00
Accessor : req . ClientTokenAccessor ,
2018-04-14 01:49:40 +00:00
}
if te != nil {
2018-09-18 03:03:00 +00:00
auth . IdentityPolicies = identityPolicies [ te . NamespaceID ]
delete ( identityPolicies , te . NamespaceID )
auth . ExternalNamespacePolicies = identityPolicies
2018-06-14 13:49:33 +00:00
auth . TokenPolicies = te . Policies
2018-09-18 03:03:00 +00:00
auth . Policies = append ( te . Policies , identityPolicies [ te . NamespaceID ] ... )
2018-04-14 01:49:40 +00:00
auth . Metadata = te . Meta
auth . DisplayName = te . DisplayName
auth . EntityID = te . EntityID
2018-10-15 16:56:24 +00:00
auth . TokenType = te . Type
2016-05-20 17:03:54 +00:00
}
2019-05-22 22:52:53 +00:00
logInput := & logical . LogInput {
2018-03-02 17:18:39 +00:00
Auth : auth ,
Request : req ,
}
if err := c . auditBroker . LogRequest ( ctx , logInput , c . auditedHeaders ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "failed to audit request" , "request_path" , req . Path , "error" , err )
2016-05-20 17:03:54 +00:00
retErr = multierror . Append ( retErr , errors . New ( "failed to audit request, cannot continue" ) )
return retErr
}
2018-04-14 01:49:40 +00:00
if entity != nil && entity . Disabled {
2018-06-19 16:57:19 +00:00
c . logger . Warn ( "permission denied as the entity on the token is disabled" )
retErr = multierror . Append ( retErr , logical . ErrPermissionDenied )
return retErr
}
if te != nil && te . EntityID != "" && entity == nil {
c . logger . Warn ( "permission denied as the entity on the token is invalid" )
2018-04-23 20:50:04 +00:00
retErr = multierror . Append ( retErr , logical . ErrPermissionDenied )
2018-04-14 01:49:40 +00:00
return retErr
}
2016-02-29 02:35:32 +00:00
// Attempt to use the token (decrement num_uses)
2016-05-02 07:11:14 +00:00
// On error bail out; if the token has been revoked, bail out too
2016-02-29 02:35:32 +00:00
if te != nil {
2018-01-19 06:44:44 +00:00
te , err = c . tokenStore . UseToken ( ctx , te )
2016-05-02 07:11:14 +00:00
if err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "failed to use token" , "error" , err )
2016-05-16 20:11:33 +00:00
retErr = multierror . Append ( retErr , ErrInternalError )
return retErr
2016-05-02 07:11:14 +00:00
}
if te == nil {
// Token is no longer valid
2016-05-16 20:11:33 +00:00
retErr = multierror . Append ( retErr , logical . ErrPermissionDenied )
return retErr
2016-05-02 07:11:14 +00:00
}
2016-02-29 02:35:32 +00:00
}
2015-03-31 16:59:02 +00:00
2016-01-07 20:10:05 +00:00
// Verify that this operation is allowed
2018-01-19 07:43:36 +00:00
authResults := c . performPolicyChecks ( ctx , acl , te , req , entity , & PolicyCheckOpts {
2017-10-23 20:03:36 +00:00
RootPrivsRequired : true ,
} )
if ! authResults . Allowed {
2018-08-11 02:32:10 +00:00
retErr = multierror . Append ( retErr , authResults . Error )
2018-08-11 01:05:10 +00:00
if authResults . Error . ErrorOrNil ( ) == nil || authResults . DeniedError {
2018-08-11 02:32:10 +00:00
retErr = multierror . Append ( retErr , logical . ErrPermissionDenied )
2018-08-11 01:05:10 +00:00
}
2018-08-11 02:32:10 +00:00
return retErr
2016-01-07 20:10:05 +00:00
}
2018-05-10 19:50:02 +00:00
if te != nil && te . NumUses == tokenRevocationPending {
2017-11-02 13:47:02 +00:00
// Token needs to be revoked. We do this immediately here because
// we won't have a token store after sealing.
2018-08-02 01:39:39 +00:00
leaseID , err := c . expiration . CreateOrFetchRevocationLeaseByToken ( c . activeContext , te )
2018-05-10 19:50:02 +00:00
if err == nil {
2018-08-02 01:39:39 +00:00
err = c . expiration . Revoke ( c . activeContext , leaseID )
2018-05-10 19:50:02 +00:00
}
2017-11-02 13:47:02 +00:00
if err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "token needed revocation before seal but failed to revoke" , "error" , err )
2017-11-02 13:47:02 +00:00
retErr = multierror . Append ( retErr , ErrInternalError )
}
}
2018-07-24 20:57:25 +00:00
// Unlock; sealing will grab the lock when needed
2019-06-19 13:40:57 +00:00
unlocked = true
2017-08-04 20:42:51 +00:00
c . stateLock . RUnlock ( )
2018-07-25 03:26:28 +00:00
sealErr := c . sealInternal ( )
2017-08-04 20:42:51 +00:00
2018-03-06 23:06:09 +00:00
if sealErr != nil {
retErr = multierror . Append ( retErr , sealErr )
2015-08-20 17:37:42 +00:00
}
2018-03-06 23:06:09 +00:00
return
2015-06-18 01:23:59 +00:00
}
2018-03-27 20:23:33 +00:00
// UIEnabled returns if the UI is enabled
func ( c * Core ) UIEnabled ( ) bool {
return c . uiConfig . Enabled ( )
}
// UIHeaders returns configured UI headers
func ( c * Core ) UIHeaders ( ) ( http . Header , error ) {
return c . uiConfig . Headers ( context . Background ( ) )
}
2016-02-27 00:43:55 +00:00
// sealInternal is an internal method used to seal the vault. It does not do
2018-07-24 20:57:25 +00:00
// any authorization checking.
2018-07-25 03:26:28 +00:00
func ( c * Core ) sealInternal ( ) error {
2019-06-21 00:55:10 +00:00
return c . sealInternalWithOptions ( true , false , true )
2018-07-25 03:26:28 +00:00
}
2020-06-23 19:04:13 +00:00
func ( c * Core ) sealInternalWithOptions ( grabStateLock , keepHALock , performCleanup bool ) error {
2018-07-24 20:57:25 +00:00
// Mark sealed, and if already marked return
if swapped := atomic . CompareAndSwapUint32 ( c . sealed , 0 , 1 ) ; ! swapped {
2017-08-04 20:42:51 +00:00
return nil
}
2020-06-17 14:50:28 +00:00
c . metricSink . SetGaugeWithLabels ( [ ] string { "core" , "unsealed" } , 0 , nil )
2017-08-04 20:42:51 +00:00
2018-09-04 17:53:40 +00:00
c . logger . Info ( "marked as sealed" )
2017-02-28 23:17:19 +00:00
2017-03-01 23:16:47 +00:00
// Clear forwarding clients
c . requestForwardingConnectionLock . Lock ( )
c . clearForwardingClients ( )
c . requestForwardingConnectionLock . Unlock ( )
2018-08-01 19:07:37 +00:00
activeCtxCancel := c . activeContextCancelFunc . Load ( ) . ( context . CancelFunc )
cancelCtxAndLock := func ( ) {
doneCh := make ( chan struct { } )
go func ( ) {
select {
case <- doneCh :
// Attempt to drain any inflight requests
case <- time . After ( DefaultMaxRequestDuration ) :
if activeCtxCancel != nil {
activeCtxCancel ( )
}
}
} ( )
c . stateLock . Lock ( )
close ( doneCh )
// Stop requests from processing
if activeCtxCancel != nil {
activeCtxCancel ( )
}
}
2015-04-14 21:06:15 +00:00
// Do pre-seal teardown if HA is not enabled
if c . ha == nil {
2018-07-30 18:54:54 +00:00
if grabStateLock {
2018-08-01 19:07:37 +00:00
cancelCtxAndLock ( )
2018-07-30 18:54:54 +00:00
defer c . stateLock . Unlock ( )
}
2017-01-11 16:13:09 +00:00
// Even in a non-HA context we key off of this for some things
c . standby = true
2018-07-24 20:57:25 +00:00
// Stop requests from processing
2018-08-01 19:07:37 +00:00
if activeCtxCancel != nil {
activeCtxCancel ( )
2018-07-24 20:57:25 +00:00
}
2015-04-14 21:06:15 +00:00
if err := c . preSeal ( ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "pre-seal teardown failed" , "error" , err )
2015-04-14 21:06:15 +00:00
return fmt . Errorf ( "internal error" )
}
} else {
2018-07-24 20:57:25 +00:00
// If we are keeping the lock we already have the state write lock
// held. Otherwise grab it here so that when stopCh is triggered we are
// locked.
2018-07-25 03:26:28 +00:00
if keepHALock {
2018-06-09 19:35:22 +00:00
atomic . StoreUint32 ( c . keepHALockOnStepDown , 1 )
2018-07-25 03:26:28 +00:00
}
if grabStateLock {
2018-08-01 19:07:37 +00:00
cancelCtxAndLock ( )
2018-07-24 20:57:25 +00:00
defer c . stateLock . Unlock ( )
2018-03-07 02:35:58 +00:00
}
2018-08-01 19:07:37 +00:00
2018-03-06 23:06:09 +00:00
// If we are trying to acquire the lock, force it to return with nil so
// runStandby will exit
// If we are active, signal the standby goroutine to shut down and wait
// for completion. We have the state lock here so nothing else should
// be toggling standby status.
2020-07-21 12:34:07 +00:00
close ( c . standbyStopCh . Load ( ) . ( chan struct { } ) )
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "finished triggering standbyStopCh for runStandby" )
2018-03-06 23:06:09 +00:00
2018-03-07 02:35:58 +00:00
// Wait for runStandby to stop
<- c . standbyDoneCh
2018-06-09 19:35:22 +00:00
atomic . StoreUint32 ( c . keepHALockOnStepDown , 0 )
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "runStandby done" )
2015-03-13 18:16:24 +00:00
}
2020-02-15 00:39:13 +00:00
c . teardownReplicationResolverHandler ( )
2020-06-23 19:04:13 +00:00
// Perform additional cleanup upon sealing.
if performCleanup {
if raftBackend := c . getRaftBackend ( ) ; raftBackend != nil {
if err := raftBackend . TeardownCluster ( c . getClusterListener ( ) ) ; err != nil {
2019-06-21 00:55:10 +00:00
c . logger . Error ( "error stopping storage cluster" , "error" , err )
return err
}
2019-06-20 19:14:58 +00:00
}
2019-06-21 00:55:10 +00:00
// Stop the cluster listener
c . stopClusterListener ( )
}
2019-02-15 02:14:56 +00:00
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "sealing barrier" )
2015-03-13 18:34:40 +00:00
if err := c . barrier . Seal ( ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "error sealing barrier" , "error" , err )
2015-03-13 18:34:40 +00:00
return err
}
2016-02-27 00:43:55 +00:00
2019-12-06 14:46:39 +00:00
if c . serviceRegistration != nil {
2020-01-24 17:42:03 +00:00
if err := c . serviceRegistration . NotifySealedStateChange ( true ) ; err != nil {
2019-12-06 14:46:39 +00:00
if c . logger . IsWarn ( ) {
c . logger . Warn ( "failed to notify sealed status" , "error" , err )
2016-04-28 17:56:41 +00:00
}
2016-04-23 02:55:17 +00:00
}
}
2020-07-02 01:14:33 +00:00
if c . quotaManager != nil {
if err := c . quotaManager . Reset ( ) ; err != nil {
c . logger . Error ( "error resetting quota manager" , "error" , err )
}
2020-06-26 21:13:16 +00:00
}
2018-09-18 03:03:00 +00:00
2020-06-29 17:23:10 +00:00
postSealInternal ( c )
2018-04-03 00:46:59 +00:00
c . logger . Info ( "vault is sealed" )
2017-02-28 23:17:19 +00:00
2015-03-13 18:34:40 +00:00
return nil
2015-03-10 00:45:34 +00:00
}
2015-03-11 22:19:41 +00:00
2018-09-18 03:03:00 +00:00
type UnsealStrategy interface {
unseal ( context . Context , log . Logger , * Core ) error
}
2018-04-19 17:29:43 +00:00
2018-09-18 03:03:00 +00:00
type standardUnsealStrategy struct { }
2017-01-06 20:42:18 +00:00
2018-09-18 03:03:00 +00:00
func ( s standardUnsealStrategy ) unseal ( ctx context . Context , logger log . Logger , c * Core ) error {
2017-03-01 23:16:47 +00:00
// Clear forwarding clients; we're active
c . requestForwardingConnectionLock . Lock ( )
c . clearForwardingClients ( )
c . requestForwardingConnectionLock . Unlock ( )
2020-12-12 00:50:19 +00:00
// Mark the active time. We do this first so it can be correlated to the logs
// for the active startup.
c . activeTime = time . Now ( ) . UTC ( )
2018-09-18 03:03:00 +00:00
if err := postUnsealPhysical ( c ) ; err != nil {
return err
2017-02-28 23:36:28 +00:00
}
2020-10-16 18:57:11 +00:00
if err := enterprisePostUnseal ( c , false ) ; err != nil {
2017-02-17 01:13:19 +00:00
return err
}
2020-06-04 17:00:33 +00:00
if ! c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary | consts . ReplicationDRSecondary ) {
2020-02-19 23:06:53 +00:00
// Only perf primarys should write feature flags, but we do it by
// excluding other states so that we don't have to change it when
// a non-replicated cluster becomes a primary.
if err := c . persistFeatureFlags ( ctx ) ; err != nil {
return err
}
2021-02-25 20:27:25 +00:00
}
if c . autoRotateCancel == nil {
var autoRotateCtx context . Context
autoRotateCtx , c . autoRotateCancel = context . WithCancel ( c . activeContext )
go c . autoRotateBarrierLoop ( autoRotateCtx )
2020-02-19 23:06:53 +00:00
}
2018-09-18 03:03:00 +00:00
if ! c . IsDRSecondary ( ) {
if err := c . ensureWrappingKey ( ctx ) ; err != nil {
return err
}
2017-01-04 21:44:03 +00:00
}
2018-11-07 01:21:24 +00:00
if err := c . setupPluginCatalog ( ctx ) ; err != nil {
2017-08-16 02:10:32 +00:00
return err
}
2018-08-01 19:07:37 +00:00
if err := c . loadMounts ( ctx ) ; err != nil {
2015-03-11 22:19:41 +00:00
return err
}
2019-10-27 20:30:38 +00:00
if err := enterpriseSetupFilteredPaths ( c ) ; err != nil {
return err
}
2018-08-01 19:07:37 +00:00
if err := c . setupMounts ( ctx ) ; err != nil {
2015-03-11 22:50:27 +00:00
return err
}
2018-08-01 19:07:37 +00:00
if err := c . setupPolicyStore ( ctx ) ; err != nil {
2015-11-02 16:01:00 +00:00
return err
2015-03-18 21:00:42 +00:00
}
2018-08-01 19:07:37 +00:00
if err := c . loadCORSConfig ( ctx ) ; err != nil {
2017-06-17 04:04:55 +00:00
return err
}
2019-03-05 19:55:07 +00:00
if err := c . loadCurrentRequestCounters ( ctx , time . Now ( ) ) ; err != nil {
return err
}
2018-08-01 19:07:37 +00:00
if err := c . loadCredentials ( ctx ) ; err != nil {
2015-11-02 16:01:00 +00:00
return err
2015-03-18 22:46:07 +00:00
}
2019-10-27 20:30:38 +00:00
if err := enterpriseSetupFilteredPaths ( c ) ; err != nil {
return err
}
2018-08-01 19:07:37 +00:00
if err := c . setupCredentials ( ctx ) ; err != nil {
2015-11-02 16:01:00 +00:00
return err
2015-03-18 22:30:31 +00:00
}
2020-06-26 21:13:16 +00:00
if err := c . setupQuotas ( ctx , false ) ; err != nil {
return err
}
2018-09-18 03:03:00 +00:00
if ! c . IsDRSecondary ( ) {
if err := c . startRollback ( ) ; err != nil {
return err
}
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
var expirationStrategy ExpireLeaseStrategy
if os . Getenv ( "VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY" ) != "" {
expirationStrategy = expireLeaseStrategyRevoke
} else {
expirationStrategy = expireLeaseStrategyFairsharing
}
if err := c . setupExpiration ( expirationStrategy ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
if err := c . loadAudits ( ctx ) ; err != nil {
return err
}
if err := c . setupAudits ( ctx ) ; err != nil {
return err
}
if err := c . loadIdentityStoreArtifacts ( ctx ) ; err != nil {
return err
}
if err := loadMFAConfigs ( ctx , c ) ; err != nil {
return err
}
if err := c . setupAuditedHeadersConfig ( ctx ) ; err != nil {
2020-06-30 18:33:30 +00:00
return err
}
2020-12-02 20:48:13 +00:00
// not waiting on wg to avoid changing existing behavior
var wg sync . WaitGroup
if err := c . setupActivityLog ( ctx , & wg ) ; err != nil {
2020-09-08 19:22:09 +00:00
return err
}
2020-06-30 22:32:06 +00:00
} else {
c . auditBroker = NewAuditBroker ( c . logger )
}
if ! c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary | consts . ReplicationDRSecondary ) {
//Cannot do this above, as we need other resources like mounts to be setup
2020-06-30 18:33:30 +00:00
if err := c . setupPluginReload ( ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
2017-09-01 05:02:03 +00:00
}
2018-09-18 03:03:00 +00:00
2019-09-03 15:59:56 +00:00
if c . getClusterListener ( ) != nil && ( c . ha != nil || shouldStartClusterListener ( c ) ) {
2019-07-03 20:56:30 +00:00
if err := c . setupRaftActiveNode ( ctx ) ; err != nil {
return err
}
2019-06-27 17:00:03 +00:00
2019-02-15 02:14:56 +00:00
if err := c . startForwarding ( ctx ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
2019-06-20 19:14:58 +00:00
2015-03-24 01:00:14 +00:00
}
2018-09-18 03:03:00 +00:00
c . clusterParamsLock . Lock ( )
defer c . clusterParamsLock . Unlock ( )
if err := startReplication ( c ) ; err != nil {
2015-03-27 21:00:38 +00:00
return err
}
2018-09-18 03:03:00 +00:00
return nil
}
2020-09-15 22:12:28 +00:00
// postUnseal is invoked on the active node, and performance standby nodes,
// after the barrier is unsealed, but before
2018-09-18 03:03:00 +00:00
// allowing any user operations. This allows us to setup any state that
// requires the Vault to be unsealed such as mount tables, logical backends,
// credential stores, etc.
func ( c * Core ) postUnseal ( ctx context . Context , ctxCancelFunc context . CancelFunc , unsealer UnsealStrategy ) ( retErr error ) {
defer metrics . MeasureSince ( [ ] string { "core" , "post_unseal" } , time . Now ( ) )
// Clear any out
c . postUnsealFuncs = nil
// Create a new request context
c . activeContext = ctx
c . activeContextCancelFunc . Store ( ctxCancelFunc )
defer func ( ) {
if retErr != nil {
ctxCancelFunc ( )
c . preSeal ( )
}
} ( )
c . logger . Info ( "post-unseal setup starting" )
// Enable the cache
c . physicalCache . Purge ( ctx )
if ! c . cachingDisabled {
c . physicalCache . SetEnabled ( true )
2015-03-27 21:00:38 +00:00
}
2018-09-18 03:03:00 +00:00
// Purge these for safety in case of a rekey
c . seal . SetBarrierConfig ( ctx , nil )
if c . seal . RecoveryKeySupported ( ) {
c . seal . SetRecoveryConfig ( ctx , nil )
2017-10-23 20:03:36 +00:00
}
2018-09-18 03:03:00 +00:00
if err := unsealer . unseal ( ctx , c . logger , c ) ; err != nil {
2017-02-02 19:49:20 +00:00
return err
}
2017-04-04 00:52:29 +00:00
2019-10-03 20:40:18 +00:00
// Automatically re-encrypt the keys used for auto unsealing when the
// seal's encryption key changes. The regular rotation of cryptographic
// keys is a NIST recommendation. Access to prior keys for decryption
// is normally supported for a configurable time period. Re-encrypting
// the keys used for auto unsealing ensures Vault and its data will
// continue to be accessible even after prior seal keys are destroyed.
if seal , ok := c . seal . ( * autoSeal ) ; ok {
if err := seal . UpgradeKeys ( c . activeContext ) ; err != nil {
c . logger . Warn ( "post-unseal upgrade seal keys failed" , "error" , err )
}
}
2015-04-08 23:43:17 +00:00
c . metricsCh = make ( chan struct { } )
go c . emitMetrics ( c . metricsCh )
2018-04-19 17:29:43 +00:00
// This is intentionally the last block in this function. We want to allow
// writes just before allowing client requests, to ensure everything has
// been set up properly before any writes can have happened.
for _ , v := range c . postUnsealFuncs {
v ( )
}
2020-10-23 18:16:04 +00:00
if atomic . LoadUint32 ( c . sealMigrationDone ) == 1 {
2019-10-16 18:00:00 +00:00
if err := c . postSealMigration ( ctx ) ; err != nil {
c . logger . Warn ( "post-unseal post seal migration failed" , "error" , err )
}
2019-10-16 16:52:37 +00:00
}
2018-04-03 00:46:59 +00:00
c . logger . Info ( "post-unseal setup complete" )
2015-03-11 22:19:41 +00:00
return nil
}
2015-03-13 18:16:24 +00:00
// preSeal is invoked before the barrier is sealed, allowing
// for any state teardown required.
2015-11-02 18:29:18 +00:00
func ( c * Core ) preSeal ( ) error {
2015-04-08 23:43:17 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "pre_seal" } , time . Now ( ) )
2018-04-03 00:46:59 +00:00
c . logger . Info ( "pre-seal teardown starting" )
2015-05-28 19:07:52 +00:00
2018-04-19 17:29:43 +00:00
// Clear any pending funcs
c . postUnsealFuncs = nil
2020-12-12 00:50:19 +00:00
c . activeTime = time . Time { }
2018-04-19 17:29:43 +00:00
2015-05-28 19:07:52 +00:00
// Clear any rekey progress
2016-04-04 14:44:22 +00:00
c . barrierRekeyConfig = nil
c . recoveryRekeyConfig = nil
2015-05-28 19:07:52 +00:00
2015-04-08 23:43:17 +00:00
if c . metricsCh != nil {
close ( c . metricsCh )
c . metricsCh = nil
}
2015-11-02 18:29:18 +00:00
var result error
2017-02-17 01:13:19 +00:00
2019-06-21 00:55:10 +00:00
c . stopForwarding ( )
2019-07-03 20:56:30 +00:00
c . stopRaftActiveNode ( )
2019-06-27 17:00:03 +00:00
2018-09-18 03:03:00 +00:00
c . clusterParamsLock . Lock ( )
if err := stopReplication ( c ) ; err != nil {
result = multierror . Append ( result , errwrap . Wrapf ( "error stopping replication: {{err}}" , err ) )
}
c . clusterParamsLock . Unlock ( )
2015-03-27 21:00:38 +00:00
if err := c . teardownAudits ( ) ; err != nil {
2016-08-19 20:45:17 +00:00
result = multierror . Append ( result , errwrap . Wrapf ( "error tearing down audits: {{err}}" , err ) )
2015-03-27 21:00:38 +00:00
}
2015-03-24 01:00:14 +00:00
if err := c . stopExpiration ( ) ; err != nil {
2016-08-19 20:45:17 +00:00
result = multierror . Append ( result , errwrap . Wrapf ( "error stopping expiration: {{err}}" , err ) )
2015-03-24 01:00:14 +00:00
}
2020-10-29 23:47:34 +00:00
if err := c . stopActivityLog ( ) ; err != nil {
result = multierror . Append ( result , errwrap . Wrapf ( "error stopping activity log: {{err}}" , err ) )
}
2018-08-01 19:07:37 +00:00
if err := c . teardownCredentials ( context . Background ( ) ) ; err != nil {
2016-08-19 20:45:17 +00:00
result = multierror . Append ( result , errwrap . Wrapf ( "error tearing down credentials: {{err}}" , err ) )
2015-03-18 22:30:31 +00:00
}
2015-03-18 21:00:42 +00:00
if err := c . teardownPolicyStore ( ) ; err != nil {
2016-08-19 20:45:17 +00:00
result = multierror . Append ( result , errwrap . Wrapf ( "error tearing down policy store: {{err}}" , err ) )
2015-03-18 21:00:42 +00:00
}
2015-03-17 23:23:58 +00:00
if err := c . stopRollback ( ) ; err != nil {
2016-08-19 20:45:17 +00:00
result = multierror . Append ( result , errwrap . Wrapf ( "error stopping rollback: {{err}}" , err ) )
2015-03-17 23:23:58 +00:00
}
2018-08-01 19:07:37 +00:00
if err := c . unloadMounts ( context . Background ( ) ) ; err != nil {
2016-08-19 20:45:17 +00:00
result = multierror . Append ( result , errwrap . Wrapf ( "error unloading mounts: {{err}}" , err ) )
2015-03-13 18:16:24 +00:00
}
2017-02-17 01:13:19 +00:00
if err := enterprisePreSeal ( c ) ; err != nil {
result = multierror . Append ( result , err )
}
2021-02-25 20:27:25 +00:00
if c . autoRotateCancel != nil {
c . autoRotateCancel ( )
c . autoRotateCancel = nil
}
2018-09-18 03:03:00 +00:00
preSealPhysical ( c )
2018-01-26 03:21:51 +00:00
2018-04-03 00:46:59 +00:00
c . logger . Info ( "pre-seal teardown complete" )
2015-11-02 18:29:18 +00:00
return result
2015-03-13 18:16:24 +00:00
}
2015-04-08 23:43:17 +00:00
2020-10-16 18:57:11 +00:00
func enterprisePostUnsealImpl ( c * Core , isStandby bool ) error {
2017-02-17 01:13:19 +00:00
return nil
}
func enterprisePreSealImpl ( c * Core ) error {
return nil
}
2019-10-27 20:30:38 +00:00
func enterpriseSetupFilteredPathsImpl ( c * Core ) error {
return nil
}
2020-06-26 21:13:16 +00:00
func enterpriseSetupQuotasImpl ( ctx context . Context , c * Core ) error {
return nil
}
2017-02-17 01:13:19 +00:00
func startReplicationImpl ( c * Core ) error {
return nil
}
func stopReplicationImpl ( c * Core ) error {
return nil
}
2017-02-16 20:15:02 +00:00
func ( c * Core ) ReplicationState ( ) consts . ReplicationState {
2018-01-16 18:51:55 +00:00
return consts . ReplicationState ( atomic . LoadUint32 ( c . replicationState ) )
2017-02-16 20:15:02 +00:00
}
2018-01-20 00:24:04 +00:00
func ( c * Core ) ActiveNodeReplicationState ( ) consts . ReplicationState {
return consts . ReplicationState ( atomic . LoadUint32 ( c . activeNodeReplicationState ) )
}
2016-04-04 14:44:22 +00:00
func ( c * Core ) SealAccess ( ) * SealAccess {
2017-10-23 20:03:36 +00:00
return NewSealAccess ( c . seal )
2016-04-04 14:44:22 +00:00
}
2016-08-15 13:42:42 +00:00
2019-09-18 19:07:18 +00:00
// StorageType returns a string equal to the storage configuration's type.
func ( c * Core ) StorageType ( ) string {
return c . storageType
}
2016-08-19 20:45:17 +00:00
func ( c * Core ) Logger ( ) log . Logger {
2016-08-15 13:42:42 +00:00
return c . logger
}
2016-08-15 20:01:15 +00:00
func ( c * Core ) BarrierKeyLength ( ) ( min , max int ) {
min , max = c . barrier . KeyLength ( )
max += shamir . ShareOverhead
return
}
2017-02-02 19:49:20 +00:00
func ( c * Core ) AuditedHeadersConfig ( ) * AuditedHeadersConfig {
return c . auditedHeaders
}
2017-03-01 17:39:42 +00:00
2018-09-18 03:03:00 +00:00
func waitUntilWALShippedImpl ( ctx context . Context , c * Core , index uint64 ) bool {
return true
}
2019-07-22 17:11:00 +00:00
func merkleRootImpl ( c * Core ) string {
return ""
}
2018-10-16 13:38:44 +00:00
func lastWALImpl ( c * Core ) uint64 {
return 0
}
2019-07-22 17:11:00 +00:00
func lastPerformanceWALImpl ( c * Core ) uint64 {
return 0
}
2017-03-01 17:42:10 +00:00
func lastRemoteWALImpl ( c * Core ) uint64 {
2017-03-01 17:39:42 +00:00
return 0
}
2017-10-23 20:03:36 +00:00
2021-02-24 11:58:10 +00:00
func lastRemoteUpstreamWALImpl ( c * Core ) uint64 {
return 0
}
2018-10-23 06:34:02 +00:00
func ( c * Core ) PhysicalSealConfigs ( ctx context . Context ) ( * SealConfig , * SealConfig , error ) {
pe , err := c . physical . Get ( ctx , barrierSealConfigPath )
if err != nil {
return nil , nil , errwrap . Wrapf ( "failed to fetch barrier seal configuration at migration check time: {{err}}" , err )
}
if pe == nil {
return nil , nil , nil
}
barrierConf := new ( SealConfig )
if err := jsonutil . DecodeJSON ( pe . Value , barrierConf ) ; err != nil {
return nil , nil , errwrap . Wrapf ( "failed to decode barrier seal configuration at migration check time: {{err}}" , err )
}
2018-12-14 00:44:56 +00:00
err = barrierConf . Validate ( )
if err != nil {
return nil , nil , errwrap . Wrapf ( "failed to validate barrier seal configuration at migration check time: {{err}}" , err )
}
// In older versions of vault the default seal would not store a type. This
2019-03-19 13:32:45 +00:00
// is here to offer backwards compatibility for older seal configs.
2018-12-14 00:44:56 +00:00
if barrierConf . Type == "" {
2020-01-11 01:39:52 +00:00
barrierConf . Type = wrapping . Shamir
2018-12-14 00:44:56 +00:00
}
2018-10-23 06:34:02 +00:00
var recoveryConf * SealConfig
pe , err = c . physical . Get ( ctx , recoverySealConfigPlaintextPath )
if err != nil {
return nil , nil , errwrap . Wrapf ( "failed to fetch seal configuration at migration check time: {{err}}" , err )
}
if pe != nil {
recoveryConf = & SealConfig { }
if err := jsonutil . DecodeJSON ( pe . Value , recoveryConf ) ; err != nil {
return nil , nil , errwrap . Wrapf ( "failed to decode seal configuration at migration check time: {{err}}" , err )
}
2018-12-14 00:44:56 +00:00
err = recoveryConf . Validate ( )
if err != nil {
return nil , nil , errwrap . Wrapf ( "failed to validate seal configuration at migration check time: {{err}}" , err )
}
// In older versions of vault the default seal would not store a type. This
2019-03-19 13:32:45 +00:00
// is here to offer backwards compatibility for older seal configs.
2018-12-14 00:44:56 +00:00
if recoveryConf . Type == "" {
2020-01-11 01:39:52 +00:00
recoveryConf . Type = wrapping . Shamir
2018-12-14 00:44:56 +00:00
}
2018-10-23 06:34:02 +00:00
}
return barrierConf , recoveryConf , nil
}
2020-10-23 18:16:04 +00:00
// adjustForSealMigration takes the unwrapSeal, which is nil if (a) we're not
// configured for seal migration or (b) we might be doing a seal migration away
// from shamir. It will only be non-nil if there is a configured seal with
// the config key disabled=true, which implies a migration away from autoseal.
//
// For case (a), the common case, we expect that the stored barrier
// config matches the seal type, in which case we simply return nil. If they
// don't match, and the stored seal config is of type Shamir but the configured
// seal is not Shamir, that is case (b) and we make an unwrapSeal of type Shamir.
// Any other unwrapSeal=nil scenario is treated as an error.
//
// Given a non-nil unwrapSeal or case (b), we setup c.migrationInfo to prepare
// for a migration upon receiving a valid migration unseal request. We cannot
// check at this time for already performed (or incomplete) migrations because
// we haven't yet been unsealed, so we have no way of checking whether a
// shamir seal works to read stored seal-encrypted data.
//
// The assumption throughout is that the very last step of seal migration is
// to write the new barrier/recovery stored seal config.
2020-06-11 19:07:59 +00:00
func ( c * Core ) adjustForSealMigration ( unwrapSeal Seal ) error {
2020-10-23 18:16:04 +00:00
ctx := context . Background ( )
existBarrierSealConfig , existRecoverySealConfig , err := c . PhysicalSealConfigs ( ctx )
2020-06-11 19:07:59 +00:00
if err != nil {
return fmt . Errorf ( "Error checking for existing seal: %s" , err )
}
// If we don't have an existing config or if it's the deprecated auto seal
// which needs an upgrade, skip out
if existBarrierSealConfig == nil || existBarrierSealConfig . Type == wrapping . HSMAutoDeprecated {
return nil
}
if unwrapSeal == nil {
2020-10-23 18:16:04 +00:00
// With unwrapSeal==nil, either we're not migrating, or we're migrating
// from shamir.
switch {
case existBarrierSealConfig . Type == c . seal . BarrierType ( ) :
// We have the same barrier type and the unwrap seal is nil so we're not
// migrating from same to same, IOW we assume it's not a migration.
2020-06-11 19:07:59 +00:00
return nil
2020-10-23 18:16:04 +00:00
case c . seal . BarrierType ( ) == wrapping . Shamir :
// The stored barrier config is not shamir, there is no disabled seal
// in config, and either no configured seal (which equates to Shamir)
// or an explicitly configured Shamir seal.
return fmt . Errorf ( "cannot seal migrate from %q to Shamir, no disabled seal in configuration" ,
existBarrierSealConfig . Type )
case existBarrierSealConfig . Type == wrapping . Shamir :
// The configured seal is not Shamir, the stored seal config is Shamir.
// This is a migration away from Shamir.
unwrapSeal = NewDefaultSeal ( & vaultseal . Access {
Wrapper : aeadwrapper . NewShamirWrapper ( & wrapping . WrapperOptions {
Logger : c . logger . Named ( "shamir" ) ,
} ) ,
} )
default :
// We know at this point that there is a configured non-Shamir seal,
// that it does not match the stored non-Shamir seal config, and that
// there is no explicit disabled seal stanza.
return fmt . Errorf ( "cannot seal migrate from %q to %q, no disabled seal in configuration" ,
existBarrierSealConfig . Type , c . seal . BarrierType ( ) )
2020-06-11 19:07:59 +00:00
}
} else {
2020-08-10 12:35:57 +00:00
// If we're not coming from Shamir we expect the previous seal to be
// in the config and disabled.
2020-06-11 19:07:59 +00:00
if unwrapSeal . BarrierType ( ) == wrapping . Shamir {
return errors . New ( "Shamir seals cannot be set disabled (they should simply not be set)" )
}
}
2020-08-10 12:35:57 +00:00
2020-10-23 18:16:04 +00:00
// If we've reached this point it's a migration attempt and we should have both
// c.migrationInfo.seal (old seal) and c.seal (new seal) populated.
unwrapSeal . SetCore ( c )
2020-06-11 19:07:59 +00:00
2020-10-23 18:16:04 +00:00
// No stored recovery seal config found, what about the legacy recovery config?
2020-06-11 19:07:59 +00:00
if existBarrierSealConfig . Type != wrapping . Shamir && existRecoverySealConfig == nil {
2020-10-23 18:16:04 +00:00
entry , err := c . physical . Get ( ctx , recoverySealConfigPath )
2020-08-10 12:35:57 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return errwrap . Wrapf ( fmt . Sprintf ( "failed to read %q recovery seal configuration: {{err}}" , existBarrierSealConfig . Type ) , err )
2020-08-10 12:35:57 +00:00
}
if entry == nil {
return errors . New ( "Recovery seal configuration not found for existing seal" )
}
return errors . New ( "Cannot migrate seals while using a legacy recovery seal config" )
2020-06-11 19:07:59 +00:00
}
2020-08-10 12:35:57 +00:00
c . migrationInfo = & migrationInformation {
2020-10-23 18:16:04 +00:00
seal : unwrapSeal ,
}
if existBarrierSealConfig . Type != c . seal . BarrierType ( ) {
// It's unnecessary to call this when doing an auto->auto
// same-seal-type migration, since they'll have the same configs before
// and after migration.
c . adjustSealConfigDuringMigration ( existBarrierSealConfig , existRecoverySealConfig )
2020-06-11 19:07:59 +00:00
}
2020-08-10 12:35:57 +00:00
c . initSealsForMigration ( )
c . logger . Warn ( "entering seal migration mode; Vault will not automatically unseal even if using an autoseal" , "from_barrier_type" , c . migrationInfo . seal . BarrierType ( ) , "to_barrier_type" , c . seal . BarrierType ( ) )
2020-06-11 19:07:59 +00:00
2020-08-10 12:35:57 +00:00
return nil
}
2020-10-23 18:16:04 +00:00
func ( c * Core ) migrateSealConfig ( ctx context . Context ) error {
existBarrierSealConfig , existRecoverySealConfig , err := c . PhysicalSealConfigs ( ctx )
if err != nil {
return fmt . Errorf ( "failed to read existing seal configuration during migration: %v" , err )
2020-06-11 19:07:59 +00:00
}
2020-10-23 18:16:04 +00:00
var bc , rc * SealConfig
2020-06-11 19:07:59 +00:00
switch {
2020-10-23 18:16:04 +00:00
case c . migrationInfo . seal . RecoveryKeySupported ( ) && c . seal . RecoveryKeySupported ( ) :
2020-06-11 19:07:59 +00:00
// Migrating from auto->auto, copy the configs over
2020-10-23 18:16:04 +00:00
bc , rc = existBarrierSealConfig , existRecoverySealConfig
case c . migrationInfo . seal . RecoveryKeySupported ( ) :
2020-06-11 19:07:59 +00:00
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
2020-10-23 18:16:04 +00:00
bc = existRecoverySealConfig . Clone ( )
bc . StoredShares = 1
case c . seal . RecoveryKeySupported ( ) :
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
bc = & SealConfig {
Type : c . seal . BarrierType ( ) ,
SecretShares : 1 ,
SecretThreshold : 1 ,
StoredShares : 1 ,
}
rc = existBarrierSealConfig . Clone ( )
rc . StoredShares = 0
}
if err := c . seal . SetBarrierConfig ( ctx , bc ) ; err != nil {
return errwrap . Wrapf ( "error storing barrier config after migration: {{err}}" , err )
}
if c . seal . RecoveryKeySupported ( ) {
if err := c . seal . SetRecoveryConfig ( ctx , rc ) ; err != nil {
return errwrap . Wrapf ( "error storing recovery config after migration: {{err}}" , err )
}
} else if err := c . physical . Delete ( ctx , recoverySealConfigPlaintextPath ) ; err != nil {
return errwrap . Wrapf ( "failed to delete old recovery seal configuration during migration: {{err}}" , err )
}
return nil
}
func ( c * Core ) adjustSealConfigDuringMigration ( existBarrierSealConfig , existRecoverySealConfig * SealConfig ) {
switch {
case c . migrationInfo . seal . RecoveryKeySupported ( ) && existRecoverySealConfig != nil :
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1. Unless the recover config doesn't exist, in which
// case the migration is assumed to already have been performed.
2020-06-11 19:07:59 +00:00
newSealConfig := existRecoverySealConfig . Clone ( )
newSealConfig . StoredShares = 1
2020-08-10 12:35:57 +00:00
c . seal . SetCachedBarrierConfig ( newSealConfig )
2020-10-23 18:16:04 +00:00
case ! c . migrationInfo . seal . RecoveryKeySupported ( ) && c . seal . RecoveryKeySupported ( ) :
2020-06-11 19:07:59 +00:00
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
newBarrierSealConfig := & SealConfig {
2020-08-10 12:35:57 +00:00
Type : c . seal . BarrierType ( ) ,
2020-06-11 19:07:59 +00:00
SecretShares : 1 ,
SecretThreshold : 1 ,
StoredShares : 1 ,
}
2020-08-10 12:35:57 +00:00
c . seal . SetCachedBarrierConfig ( newBarrierSealConfig )
2020-06-11 19:07:59 +00:00
newRecoveryConfig := existBarrierSealConfig . Clone ( )
newRecoveryConfig . StoredShares = 0
2020-08-10 12:35:57 +00:00
c . seal . SetCachedRecoveryConfig ( newRecoveryConfig )
2019-03-04 22:11:56 +00:00
}
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
func ( c * Core ) unsealKeyToMasterKeyPostUnseal ( ctx context . Context , combinedKey [ ] byte ) ( [ ] byte , error ) {
return c . unsealKeyToMasterKey ( ctx , c . seal , combinedKey , true , false )
}
func ( c * Core ) unsealKeyToMasterKeyPreUnseal ( ctx context . Context , seal Seal , combinedKey [ ] byte ) ( [ ] byte , error ) {
return c . unsealKeyToMasterKey ( ctx , seal , combinedKey , false , true )
}
2019-10-23 16:52:28 +00:00
// unsealKeyToMasterKey takes a key provided by the user, either a recovery key
// if using an autoseal or an unseal key with Shamir. It returns a nil error
// if the key is valid and an error otherwise. It also returns the master key
// that can be used to unseal the barrier.
2020-10-23 18:16:04 +00:00
// If useTestSeal is true, seal will not be modified; this is used when not
// invoked as part of an unseal process. Otherwise in the non-legacy shamir
// case the combinedKey will be set in the seal, which means subsequent attempts
// to use the seal to read the master key will succeed, assuming combinedKey is
// valid.
// If allowMissing is true, a failure to find the master key in storage results
// in a nil error and a nil master key being returned.
func ( c * Core ) unsealKeyToMasterKey ( ctx context . Context , seal Seal , combinedKey [ ] byte , useTestSeal bool , allowMissing bool ) ( [ ] byte , error ) {
switch seal . StoredKeysSupported ( ) {
2020-01-11 01:39:52 +00:00
case vaultseal . StoredKeysSupportedGeneric :
2020-10-23 18:16:04 +00:00
if err := seal . VerifyRecoveryKey ( ctx , combinedKey ) ; err != nil {
2019-10-23 16:52:28 +00:00
return nil , errwrap . Wrapf ( "recovery key verification failed: {{err}}" , err )
}
2020-10-23 18:16:04 +00:00
storedKeys , err := seal . GetStoredKeys ( ctx )
if storedKeys == nil && err == nil && allowMissing {
return nil , nil
}
2019-10-23 16:52:28 +00:00
if err == nil && len ( storedKeys ) != 1 {
err = fmt . Errorf ( "expected exactly one stored key, got %d" , len ( storedKeys ) )
}
if err != nil {
2020-10-23 18:16:04 +00:00
return nil , errwrap . Wrapf ( "unable to retrieve stored keys: {{err}}" , err )
2019-10-23 16:52:28 +00:00
}
return storedKeys [ 0 ] , nil
2020-01-11 01:39:52 +00:00
case vaultseal . StoredKeysSupportedShamirMaster :
2020-10-23 18:16:04 +00:00
if useTestSeal {
testseal := NewDefaultSeal ( & vaultseal . Access {
Wrapper : aeadwrapper . NewShamirWrapper ( & wrapping . WrapperOptions {
Logger : c . logger . Named ( "testseal" ) ,
} ) ,
} )
testseal . SetCore ( c )
cfg , err := seal . BarrierConfig ( ctx )
if err != nil {
return nil , errwrap . Wrapf ( "failed to setup test barrier config: {{err}}" , err )
}
testseal . SetCachedBarrierConfig ( cfg )
seal = testseal
2019-10-23 16:52:28 +00:00
}
2020-10-23 18:16:04 +00:00
err := seal . GetAccess ( ) . Wrapper . ( * aeadwrapper . ShamirWrapper ) . SetAESGCMKeyBytes ( combinedKey )
2019-10-23 16:52:28 +00:00
if err != nil {
return nil , errwrap . Wrapf ( "failed to setup unseal key: {{err}}" , err )
}
2020-10-23 18:16:04 +00:00
storedKeys , err := seal . GetStoredKeys ( ctx )
if storedKeys == nil && err == nil && allowMissing {
return nil , nil
}
2019-10-23 16:52:28 +00:00
if err == nil && len ( storedKeys ) != 1 {
err = fmt . Errorf ( "expected exactly one stored key, got %d" , len ( storedKeys ) )
}
if err != nil {
2020-10-23 18:16:04 +00:00
return nil , errwrap . Wrapf ( "unable to retrieve stored keys: {{err}}" , err )
2019-10-23 16:52:28 +00:00
}
return storedKeys [ 0 ] , nil
2020-01-11 01:39:52 +00:00
case vaultseal . StoredKeysNotSupported :
2019-10-23 16:52:28 +00:00
return combinedKey , nil
}
return nil , fmt . Errorf ( "invalid seal" )
}
2020-10-23 18:16:04 +00:00
// IsInSealMigrationMode returns true if we're configured to perform a seal migration,
// meaning either that we have a disabled seal in HCL configuration or the seal
// configuration in storage is Shamir but the seal in HCL is not. In this
// mode we should not auto-unseal (even if the migration is done) and we will
// accept unseal requests with and without the `migrate` option, though the migrate
// option is required if we haven't yet performed the seal migration.
func ( c * Core ) IsInSealMigrationMode ( ) bool {
2018-10-23 06:34:02 +00:00
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
2020-02-13 21:27:31 +00:00
return c . migrationInfo != nil
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
// IsSealMigrated returns true if we're in seal migration mode but migration
// has already been performed (possibly by another node, or prior to this node's
// current invocation.)
func ( c * Core ) IsSealMigrated ( ) bool {
if ! c . IsInSealMigrationMode ( ) {
return false
}
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
done , _ := c . sealMigrated ( context . Background ( ) )
return done
}
2017-10-23 20:03:36 +00:00
func ( c * Core ) BarrierEncryptorAccess ( ) * BarrierEncryptorAccess {
return NewBarrierEncryptorAccess ( c . barrier )
}
func ( c * Core ) PhysicalAccess ( ) * physical . PhysicalAccess {
return physical . NewPhysicalAccess ( c . physical )
}
func ( c * Core ) RouterAccess ( ) * RouterAccess {
return NewRouterAccess ( c )
}
2018-01-03 20:07:13 +00:00
// IsDRSecondary returns if the current cluster state is a DR secondary.
func ( c * Core ) IsDRSecondary ( ) bool {
return c . ReplicationState ( ) . HasState ( consts . ReplicationDRSecondary )
}
2018-09-05 19:52:54 +00:00
2021-02-24 11:58:10 +00:00
func ( c * Core ) IsPerfSecondary ( ) bool {
return c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary )
}
2018-09-05 19:52:54 +00:00
func ( c * Core ) AddLogger ( logger log . Logger ) {
c . allLoggersLock . Lock ( )
defer c . allLoggersLock . Unlock ( )
c . allLoggers = append ( c . allLoggers , logger )
}
func ( c * Core ) SetLogLevel ( level log . Level ) {
c . allLoggersLock . RLock ( )
defer c . allLoggersLock . RUnlock ( )
for _ , logger := range c . allLoggers {
logger . SetLevel ( level )
}
}
2018-11-07 01:21:24 +00:00
2019-10-08 17:57:15 +00:00
// SetConfig sets core's config object to the newly provided config.
func ( c * Core ) SetConfig ( conf * server . Config ) {
2020-04-16 23:34:46 +00:00
c . rawConfig . Store ( conf )
2020-07-30 17:15:00 +00:00
bz , err := json . Marshal ( c . SanitizedConfig ( ) )
if err != nil {
c . logger . Error ( "error serializing sanitized config" , "error" , err )
return
}
c . logger . Debug ( "set config" , "sanitized config" , string ( bz ) )
2019-10-08 17:57:15 +00:00
}
// SanitizedConfig returns a sanitized version of the current config.
// See server.Config.Sanitized for specific values omitted.
func ( c * Core ) SanitizedConfig ( ) map [ string ] interface { } {
2020-04-16 23:34:46 +00:00
conf := c . rawConfig . Load ( )
if conf == nil {
return nil
}
return conf . ( * server . Config ) . Sanitized ( )
2019-10-08 17:57:15 +00:00
}
2020-05-21 20:07:50 +00:00
// LogFormat returns the log format current in use.
func ( c * Core ) LogFormat ( ) string {
conf := c . rawConfig . Load ( )
return conf . ( * server . Config ) . LogFormat
}
2019-10-04 07:29:51 +00:00
// MetricsHelper returns the global metrics helper which allows external
// packages to access Vault's internal metrics.
func ( c * Core ) MetricsHelper ( ) * metricsutil . MetricsHelper {
return c . metricsHelper
}
2020-05-13 02:00:59 +00:00
// MetricSink returns the metrics wrapper with which Core has been configured.
func ( c * Core ) MetricSink ( ) * metricsutil . ClusterMetricSink {
return c . metricSink
}
2018-11-07 01:21:24 +00:00
// BuiltinRegistry is an interface that allows the "vault" package to use
// the registry of builtin plugins without getting an import cycle. It
// also allows for mocking the registry easily.
type BuiltinRegistry interface {
Contains ( name string , pluginType consts . PluginType ) bool
Get ( name string , pluginType consts . PluginType ) ( func ( ) ( interface { } , error ) , bool )
Keys ( pluginType consts . PluginType ) [ ] string
}
2020-02-06 16:56:37 +00:00
func ( c * Core ) AuditLogger ( ) AuditLogger {
return & basicAuditor { c : c }
}
2020-02-19 23:06:53 +00:00
type FeatureFlags struct {
NamespacesCubbyholesLocal bool ` json:"namespace_cubbyholes_local" `
}
func ( c * Core ) persistFeatureFlags ( ctx context . Context ) error {
2020-06-04 17:00:33 +00:00
if ! c . PR1103disabled {
c . logger . Debug ( "persisting feature flags" )
json , err := jsonutil . EncodeJSON ( & FeatureFlags { NamespacesCubbyholesLocal : ! c . PR1103disabled } )
if err != nil {
return err
}
return c . barrier . Put ( ctx , & logical . StorageEntry {
Key : consts . CoreFeatureFlagPath ,
Value : json ,
} )
2020-02-19 23:06:53 +00:00
}
2020-06-04 17:00:33 +00:00
return nil
2020-02-19 23:06:53 +00:00
}
func ( c * Core ) readFeatureFlags ( ctx context . Context ) ( * FeatureFlags , error ) {
entry , err := c . barrier . Get ( ctx , consts . CoreFeatureFlagPath )
if err != nil {
return nil , err
}
var flags FeatureFlags
if entry != nil {
err = jsonutil . DecodeJSON ( entry . Value , & flags )
if err != nil {
return nil , err
}
}
return & flags , nil
}
2020-06-26 21:13:16 +00:00
// MatchingMount returns the path of the mount that will be responsible for
// handling the given request path.
func ( c * Core ) MatchingMount ( ctx context . Context , reqPath string ) string {
return c . router . MatchingMount ( ctx , reqPath )
}
func ( c * Core ) setupQuotas ( ctx context . Context , isPerfStandby bool ) error {
if c . quotaManager == nil {
return nil
}
return c . quotaManager . Setup ( ctx , c . systemBarrierView , isPerfStandby )
}
2020-10-16 18:58:19 +00:00
// ApplyRateLimitQuota checks the request against all the applicable quota rules.
// If the given request's path is exempt, no rate limiting will be applied.
2020-06-26 21:13:16 +00:00
func ( c * Core ) ApplyRateLimitQuota ( req * quotas . Request ) ( quotas . Response , error ) {
req . Type = quotas . TypeRateLimit
2020-10-16 18:58:19 +00:00
resp := quotas . Response {
Allowed : true ,
Headers : make ( map [ string ] string ) ,
}
2020-07-02 01:14:33 +00:00
if c . quotaManager != nil {
2020-10-16 18:58:19 +00:00
// skip rate limit checks for paths that are exempt from rate limiting
if c . quotaManager . RateLimitPathExempt ( req . Path ) {
return resp , nil
}
2020-07-02 01:14:33 +00:00
return c . quotaManager . ApplyQuota ( req )
}
2020-10-16 18:58:19 +00:00
return resp , nil
2020-06-26 21:13:16 +00:00
}
// RateLimitAuditLoggingEnabled returns if the quota configuration allows audit
// logging of request rejections due to rate limiting quota rule violations.
func ( c * Core ) RateLimitAuditLoggingEnabled ( ) bool {
2020-07-02 01:14:33 +00:00
if c . quotaManager != nil {
return c . quotaManager . RateLimitAuditLoggingEnabled ( )
}
return false
2020-06-26 21:13:16 +00:00
}
2020-07-29 19:15:05 +00:00
// RateLimitResponseHeadersEnabled returns if the quota configuration allows for
// rate limit quota HTTP headers to be added to responses.
func ( c * Core ) RateLimitResponseHeadersEnabled ( ) bool {
if c . quotaManager != nil {
return c . quotaManager . RateLimitResponseHeadersEnabled ( )
}
return false
}
2020-12-08 18:57:44 +00:00
func ( c * Core ) KeyRotateGracePeriod ( ) time . Duration {
return time . Duration ( atomic . LoadInt64 ( c . keyRotateGracePeriod ) )
}
func ( c * Core ) SetKeyRotateGracePeriod ( t time . Duration ) {
atomic . StoreInt64 ( c . keyRotateGracePeriod , int64 ( t ) )
}
2021-02-24 11:58:10 +00:00
2021-02-25 20:27:25 +00:00
// Periodically test whether to automatically rotate the barrier key
func ( c * Core ) autoRotateBarrierLoop ( ctx context . Context ) {
t := time . NewTicker ( autoRotateCheckInterval )
for {
select {
case <- t . C :
c . checkBarrierAutoRotate ( ctx )
case <- ctx . Done ( ) :
t . Stop ( )
return
}
}
}
func ( c * Core ) checkBarrierAutoRotate ( ctx context . Context ) {
2021-03-01 22:32:17 +00:00
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
2021-02-25 20:27:25 +00:00
if c . isPrimary ( ) {
reason , err := c . barrier . CheckBarrierAutoRotate ( ctx )
if err != nil {
lf := c . logger . Error
if strings . HasSuffix ( err . Error ( ) , "context canceled" ) {
lf = c . logger . Debug
}
lf ( "error in barrier auto rotation" , "error" , err )
return
}
if reason != "" {
// Time to rotate. Invoke the rotation handler in order to both rotate and create
// the replication canary
c . logger . Info ( "automatic barrier key rotation triggered" , "reason" , reason )
_ , err := c . systemBackend . handleRotate ( ctx , nil , nil )
if err != nil {
c . logger . Error ( "error automatically rotating barrier key" , "error" , err )
} else {
metrics . IncrCounter ( barrierRotationsMetric , 1 )
}
}
}
}
func ( c * Core ) isPrimary ( ) bool {
return ! c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary | consts . ReplicationDRSecondary )
}
2021-02-24 11:58:10 +00:00
func ParseRequiredState ( raw string , hmacKey [ ] byte ) ( * logical . WALState , error ) {
cooked , err := base64 . StdEncoding . DecodeString ( raw )
if err != nil {
return nil , err
}
s := string ( cooked )
lastIndex := strings . LastIndexByte ( s , ':' )
if lastIndex == - 1 {
return nil , fmt . Errorf ( "invalid state header format" )
}
state , stateHMACRaw := s [ : lastIndex ] , s [ lastIndex + 1 : ]
stateHMAC , err := hex . DecodeString ( stateHMACRaw )
if err != nil {
return nil , fmt . Errorf ( "invalid state header HMAC: %v, %w" , stateHMACRaw , err )
}
if len ( hmacKey ) != 0 {
hm := hmac . New ( sha256 . New , hmacKey )
hm . Write ( [ ] byte ( state ) )
if ! hmac . Equal ( hm . Sum ( nil ) , stateHMAC ) {
return nil , fmt . Errorf ( "invalid state header HMAC (mismatch)" )
}
}
pieces := strings . Split ( state , ":" )
if len ( pieces ) != 4 || pieces [ 0 ] != "v1" || pieces [ 1 ] == "" {
return nil , fmt . Errorf ( "invalid state header format" )
}
localIndex , err := strconv . ParseUint ( pieces [ 2 ] , 10 , 64 )
if err != nil {
return nil , fmt . Errorf ( "invalid state header format" )
}
replicatedIndex , err := strconv . ParseUint ( pieces [ 3 ] , 10 , 64 )
if err != nil {
return nil , fmt . Errorf ( "invalid state header format" )
}
return & logical . WALState {
ClusterID : pieces [ 1 ] ,
LocalIndex : localIndex ,
ReplicatedIndex : replicatedIndex ,
} , nil
}