2023-03-15 16:00:52 +00:00
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2015-03-09 23:33:27 +00:00
package vault
import (
2017-12-01 22:08:38 +00:00
"context"
2016-08-15 13:42:42 +00:00
"crypto/ecdsa"
2019-10-17 17:33:00 +00:00
"crypto/rand"
2017-02-17 01:13:19 +00:00
"crypto/subtle"
2019-10-28 16:51:45 +00:00
"crypto/tls"
2016-08-15 13:42:42 +00:00
"crypto/x509"
2020-07-30 17:15:00 +00:00
"encoding/json"
2015-03-10 00:45:34 +00:00
"errors"
2015-03-09 23:33:27 +00:00
"fmt"
2019-10-17 17:33:00 +00:00
"io"
2016-08-15 13:42:42 +00:00
"net"
"net/http"
2015-05-02 20:28:33 +00:00
"net/url"
2022-02-24 19:57:40 +00:00
"os"
2017-04-04 00:52:29 +00:00
"path/filepath"
2022-12-12 23:07:53 +00:00
"runtime"
2022-05-17 18:34:31 +00:00
"strconv"
2021-02-24 11:58:10 +00:00
"strings"
2015-03-10 00:45:34 +00:00
"sync"
2017-10-23 20:06:27 +00:00
"sync/atomic"
2015-04-08 23:43:17 +00:00
"time"
2015-03-09 23:33:27 +00:00
2020-01-14 01:02:16 +00:00
"github.com/armon/go-metrics"
2015-11-02 16:01:00 +00:00
"github.com/hashicorp/errwrap"
2019-10-08 17:57:15 +00:00
log "github.com/hashicorp/go-hclog"
2022-08-23 19:37:16 +00:00
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2"
2022-09-13 17:03:19 +00:00
"github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2"
2020-02-13 21:27:31 +00:00
"github.com/hashicorp/go-multierror"
2021-07-16 00:17:31 +00:00
"github.com/hashicorp/go-secure-stdlib/mlock"
"github.com/hashicorp/go-secure-stdlib/reloadutil"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/go-secure-stdlib/tlsutil"
2020-02-13 21:27:31 +00:00
"github.com/hashicorp/go-uuid"
2019-10-08 17:57:15 +00:00
"github.com/hashicorp/vault/api"
2015-03-27 20:45:13 +00:00
"github.com/hashicorp/vault/audit"
2019-10-08 17:57:15 +00:00
"github.com/hashicorp/vault/command/server"
2023-01-18 18:46:01 +00:00
"github.com/hashicorp/vault/helper/experiments"
2022-05-05 22:53:57 +00:00
"github.com/hashicorp/vault/helper/identity/mfa"
2022-11-29 19:38:33 +00:00
"github.com/hashicorp/vault/helper/locking"
2020-10-13 23:38:21 +00:00
"github.com/hashicorp/vault/helper/metricsutil"
2019-04-13 07:44:06 +00:00
"github.com/hashicorp/vault/helper/namespace"
2022-04-04 16:45:41 +00:00
"github.com/hashicorp/vault/helper/osutil"
2021-04-20 22:25:04 +00:00
"github.com/hashicorp/vault/physical/raft"
2019-04-12 21:54:35 +00:00
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/logging"
Add path based primary write forwarding (PBPWF) - OSS (#18735)
* Add WriteForwardedStorage to sdk's plugin, logical in OSS
This should allow backends to specify paths to forward write
(storage.Put(...) and storage.Delete(...)) operations for.
Notably, these semantics are subject to change and shouldn't yet be
relied on.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Collect paths for write forwarding in OSS
This adds a path manager to Core, allowing tracking across all Vault
versions of paths which could use write forwarding if available. In
particular, even on OSS offerings, we'll need to template {{clusterId}}
into the paths, in the event of later upgrading to Enterprise. If we
didn't, we'd end up writing paths which will no longer be accessible
post-migration, due to write forwarding now replacing the sentinel with
the actual cluster identifier.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add forwarded writer implementation to OSS
Here, for paths given to us, we determine if we need to do cluster
translation and perform local writing. This is the OSS variant.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Wire up mount-specific request forwarding in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Clarify that state lock needs to be held to call HAState in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Move cluster sentinel constant to sdk/logical
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Expose ClusterID to Plugins via SystemView
This will let plugins learn what the Cluster's ID is, without having to
resort to hacks like writing a random string to its cluster-prefixed
namespace and then reading it once it has replicated.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add GRPC ClusterID implementation
For any external plugins which wish to use it.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2023-01-20 21:36:18 +00:00
"github.com/hashicorp/vault/sdk/helper/pathmanager"
2019-04-12 21:54:35 +00:00
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/sdk/physical"
2019-12-06 14:46:39 +00:00
sr "github.com/hashicorp/vault/serviceregistration"
2015-03-11 18:34:08 +00:00
"github.com/hashicorp/vault/shamir"
2019-04-17 20:50:31 +00:00
"github.com/hashicorp/vault/vault/cluster"
2023-01-17 21:34:37 +00:00
"github.com/hashicorp/vault/vault/eventbus"
2020-06-26 21:13:16 +00:00
"github.com/hashicorp/vault/vault/quotas"
2020-01-11 01:39:52 +00:00
vaultseal "github.com/hashicorp/vault/vault/seal"
2022-12-07 18:29:51 +00:00
"github.com/hashicorp/vault/version"
2020-02-13 21:27:31 +00:00
"github.com/patrickmn/go-cache"
2021-02-24 11:58:10 +00:00
uberAtomic "go.uber.org/atomic"
2019-10-08 17:57:15 +00:00
"google.golang.org/grpc"
2015-03-09 23:33:27 +00:00
)
2015-03-10 00:45:34 +00:00
const (
2018-10-12 16:29:15 +00:00
// CoreLockPath is the path used to acquire a coordinating lock
2015-04-14 21:06:15 +00:00
// for a highly-available deploy.
2018-10-12 16:29:15 +00:00
CoreLockPath = "core/lock"
2015-04-14 21:06:15 +00:00
2017-03-03 20:00:46 +00:00
// The poison pill is used as a check during certain scenarios to indicate
// to standby nodes that they should seal
2021-09-30 01:25:15 +00:00
poisonPillPath = "core/poison-pill"
poisonPillDRPath = "core/poison-pill-dr"
2017-03-03 20:00:46 +00:00
2015-04-14 23:44:48 +00:00
// coreLeaderPrefix is the prefix used for the UUID that contains
// the currently elected leader.
coreLeaderPrefix = "core/leader/"
2017-02-17 01:13:19 +00:00
// coreKeyringCanaryPath is used as a canary to indicate to replicated
// clusters that they need to perform a rekey operation synchronously; this
// isn't keyring-canary to avoid ignoring it when ignoring core/keyring
coreKeyringCanaryPath = "core/canary-keyring"
2021-02-24 11:58:10 +00:00
2023-01-05 18:00:55 +00:00
// coreGroupPolicyApplicationPath is used to store the behaviour for
// how policies should be applied
coreGroupPolicyApplicationPath = "core/group-policy-application-mode"
// groupPolicyApplicationModeWithinNamespaceHierarchy is a configuration option for group
// policy application modes, which allows only in-namespace-hierarchy policy application
groupPolicyApplicationModeWithinNamespaceHierarchy = "within_namespace_hierarchy"
// groupPolicyApplicationModeAny is a configuration option for group
// policy application modes, which allows policy application irrespective of namespaces
groupPolicyApplicationModeAny = "any"
2021-02-24 11:58:10 +00:00
indexHeaderHMACKeyPath = "core/index-header-hmac-key"
2022-02-17 19:43:07 +00:00
2022-02-17 21:08:51 +00:00
// defaultMFAAuthResponseTTL is the default duration that Vault caches the
// MfaAuthResponse when the value is not specified in the server config
defaultMFAAuthResponseTTL = 300 * time . Second
2022-04-14 17:48:24 +00:00
// defaultMaxTOTPValidateAttempts is the default value for the number
// of failed attempts to validate a request subject to TOTP MFA. If the
// number of failed totp passcode validations exceeds this max value, the
// user needs to wait until a fresh totp passcode is generated.
defaultMaxTOTPValidateAttempts = 5
2022-02-17 19:43:07 +00:00
// ForwardSSCTokenToActive is the value that must be set in the
// forwardToActive to trigger forwarding if a perf standby encounters
// an SSC Token that it does not have the WAL state for.
ForwardSSCTokenToActive = "new_token"
2022-08-23 19:37:16 +00:00
WrapperTypeHsmAutoDeprecated = wrapping . WrapperType ( "hsm-auto" )
2022-10-06 18:24:16 +00:00
// undoLogsAreSafeStoragePath is a storage path that we write once we know undo logs are
// safe, so we don't have to keep checking all the time.
undoLogsAreSafeStoragePath = "core/raft/undo_logs_are_safe"
2023-10-16 11:38:11 +00:00
ErrMlockFailedTemplate = "Failed to lock memory: %v\n\n" +
"This usually means that the mlock syscall is not available.\n" +
"Vault uses mlock to prevent memory from being swapped to\n" +
"disk. This requires root privileges as well as a machine\n" +
"that supports mlock. Please enable mlock on your system or\n" +
"disable Vault from using it. To disable Vault from using it,\n" +
"set the `disable_mlock` configuration option in your configuration\n" +
"file."
2015-03-10 00:45:34 +00:00
)
var (
// ErrAlreadyInit is returned if the core is already
// initialized. This prevents a re-initialization.
ErrAlreadyInit = errors . New ( "Vault is already initialized" )
// ErrNotInit is returned if a non-initialized barrier
// is attempted to be unsealed.
ErrNotInit = errors . New ( "Vault is not initialized" )
2015-03-16 22:28:50 +00:00
// ErrInternalError is returned when we don't want to leak
// any information about an internal error
ErrInternalError = errors . New ( "internal error" )
2015-04-14 23:53:40 +00:00
// ErrHANotEnabled is returned if the operation only makes sense
// in an HA setting
ErrHANotEnabled = errors . New ( "Vault is not configured for highly-available mode" )
2016-08-15 13:42:42 +00:00
2022-12-15 20:19:19 +00:00
// ErrIntrospectionNotEnabled is returned if "introspection_endpoint" is not
// enabled in the configuration file
ErrIntrospectionNotEnabled = errors . New ( "The Vault configuration must set \"introspection_endpoint\" to true to enable this endpoint" )
2016-08-15 13:42:42 +00:00
// manualStepDownSleepPeriod is how long to sleep after a user-initiated
// step down of the active node, to prevent instantly regrabbing the lock.
// It's var not const so that tests can manipulate it.
manualStepDownSleepPeriod = 10 * time . Second
2017-02-17 01:13:19 +00:00
// Functions only in the Enterprise version
2019-10-27 20:30:38 +00:00
enterprisePostUnseal = enterprisePostUnsealImpl
enterprisePreSeal = enterprisePreSealImpl
enterpriseSetupFilteredPaths = enterpriseSetupFilteredPathsImpl
2020-06-26 21:13:16 +00:00
enterpriseSetupQuotas = enterpriseSetupQuotasImpl
2021-10-07 17:25:16 +00:00
enterpriseSetupAPILock = setupAPILockImpl
2019-10-27 20:30:38 +00:00
startReplication = startReplicationImpl
stopReplication = stopReplicationImpl
LastWAL = lastWALImpl
LastPerformanceWAL = lastPerformanceWALImpl
2021-10-26 21:17:20 +00:00
LastDRWAL = lastDRWALImpl
2019-10-27 20:30:38 +00:00
PerformanceMerkleRoot = merkleRootImpl
DRMerkleRoot = merkleRootImpl
LastRemoteWAL = lastRemoteWALImpl
2021-02-24 11:58:10 +00:00
LastRemoteUpstreamWAL = lastRemoteUpstreamWALImpl
2019-10-27 20:30:38 +00:00
WaitUntilWALShipped = waitUntilWALShippedImpl
2021-05-19 20:07:58 +00:00
storedLicenseCheck = func ( c * Core , conf * CoreConfig ) error { return nil }
LicenseAutoloaded = func ( * Core ) bool { return false }
LicenseInitCheck = func ( * Core ) error { return nil }
2021-05-20 17:32:15 +00:00
LicenseSummary = func ( * Core ) ( * LicenseState , error ) { return nil , nil }
2021-06-04 17:24:35 +00:00
LicenseReload = func ( * Core ) error { return nil }
2015-03-10 00:45:34 +00:00
)
2016-04-04 14:44:22 +00:00
// NonFatalError is an error that can be returned during NewCore that should be
// displayed but not cause a program exit
type NonFatalError struct {
Err error
2015-03-09 23:33:27 +00:00
}
2016-04-04 14:44:22 +00:00
func ( e * NonFatalError ) WrappedErrors ( ) [ ] error {
return [ ] error { e . Err }
2015-03-10 00:45:34 +00:00
}
2016-04-04 14:44:22 +00:00
func ( e * NonFatalError ) Error ( ) string {
return e . Err . Error ( )
2015-03-10 00:45:34 +00:00
}
2019-01-23 21:34:34 +00:00
// NewNonFatalError returns a new non-fatal error.
func NewNonFatalError ( err error ) * NonFatalError {
return & NonFatalError { Err : err }
}
2019-06-27 10:50:47 +00:00
// IsFatalError returns true if the given error is a fatal error.
2019-01-23 21:34:34 +00:00
func IsFatalError ( err error ) bool {
return ! errwrap . ContainsType ( err , new ( NonFatalError ) )
}
2016-08-24 18:15:25 +00:00
// ErrInvalidKey is returned if there is a user-based error with a provided
// unseal key. This will be shown to the user, so should not contain
// information that is sensitive.
2015-03-12 18:20:27 +00:00
type ErrInvalidKey struct {
Reason string
}
func ( e * ErrInvalidKey ) Error ( ) string {
return fmt . Sprintf ( "invalid key: %v" , e . Reason )
}
2022-07-05 17:02:00 +00:00
type RegisterAuthFunc func ( context . Context , time . Duration , string , * logical . Auth , string ) error
2018-09-18 03:03:00 +00:00
2016-08-15 13:42:42 +00:00
type activeAdvertisement struct {
2019-02-15 02:14:56 +00:00
RedirectAddr string ` json:"redirect_addr" `
ClusterAddr string ` json:"cluster_addr,omitempty" `
ClusterCert [ ] byte ` json:"cluster_cert,omitempty" `
ClusterKeyParams * certutil . ClusterKeyParams ` json:"cluster_key_params,omitempty" `
2016-08-15 13:42:42 +00:00
}
2017-01-17 16:47:06 +00:00
type unlockInformation struct {
Parts [ ] [ ] byte
Nonce string
}
2019-10-11 18:56:59 +00:00
type raftInformation struct {
2022-08-23 19:37:16 +00:00
challenge * wrapping . BlobInfo
2019-10-11 18:56:59 +00:00
leaderClient * api . Client
leaderBarrierConfig * SealConfig
2021-02-10 21:41:58 +00:00
nonVoter bool
2020-01-14 01:02:16 +00:00
joinInProgress bool
2019-10-11 18:56:59 +00:00
}
2020-02-13 21:27:31 +00:00
type migrationInformation struct {
// seal to use during a migration operation. It is the
// seal we're migrating *from*.
2020-10-23 18:16:04 +00:00
seal Seal
2020-02-13 21:27:31 +00:00
2020-10-23 18:16:04 +00:00
// unsealKey was the unseal key provided for the migration seal.
// This will be set as the recovery key when migrating from shamir to auto-seal.
// We don't need to do anything with it when migrating auto->shamir because
// we don't store the shamir combined key for shamir seals, nor when
// migrating auto->auto because then the recovery key doesn't change.
unsealKey [ ] byte
2020-02-13 21:27:31 +00:00
}
2015-03-09 23:33:27 +00:00
// Core is used as the central manager of Vault activity. It is the primary point of
// interface for API handlers and is responsible for managing the logical and physical
// backends, router, security barrier, and audit trails.
type Core struct {
2018-09-18 03:03:00 +00:00
entCore
2018-11-07 01:21:24 +00:00
// The registry of builtin plugins is passed in here as an interface because
// if it's used directly, it results in import cycles.
builtinRegistry BuiltinRegistry
2017-02-17 01:13:19 +00:00
// N.B.: This is used to populate a dev token down replication, as
// otherwise, after replication is started, a dev would have to go through
// the generate-root process simply to talk to the new follower cluster.
devToken string
2015-04-14 21:06:15 +00:00
// HABackend may be available depending on the physical backend
ha physical . HABackend
2019-09-18 19:07:18 +00:00
// storageType is the the storage type set in the storage configuration
storageType string
2016-08-15 13:42:42 +00:00
// redirectAddr is the address we advertise as leader if held
redirectAddr string
// clusterAddr is the address we use for clustering
2019-06-27 17:00:03 +00:00
clusterAddr * atomic . Value
2015-04-14 23:44:48 +00:00
2015-03-09 23:33:27 +00:00
// physical backend is the un-trusted backend with durable data
physical physical . Backend
2019-12-06 14:46:39 +00:00
// serviceRegistration is the ServiceRegistration network
serviceRegistration sr . ServiceRegistration
2022-09-06 18:11:04 +00:00
// hcpLinkStatus is a string describing the status of HCP link connection
hcpLinkStatus HCPLinkStatus
2019-06-20 19:14:58 +00:00
// underlyingPhysical will always point to the underlying backend
// implementation. This is an un-trusted backend with durable data
underlyingPhysical physical . Backend
2018-10-23 06:34:02 +00:00
// seal is our seal, for seal configuration information
2016-04-04 14:44:22 +00:00
seal Seal
2020-01-14 01:02:16 +00:00
// raftJoinDoneCh is used by the raft retry join routine to inform unseal process
// that the join is complete
raftJoinDoneCh chan struct { }
// postUnsealStarted informs the raft retry join routine that unseal key
// validation is completed and post unseal has started so that it can complete
// the join process when Shamir seal is in use
postUnsealStarted * uint32
2019-10-11 18:56:59 +00:00
// raftInfo will contain information required for this node to join as a
2022-08-03 22:40:49 +00:00
// peer to an existing raft cluster. This is marked atomic to prevent data
// races and casted to raftInformation wherever it is used.
raftInfo * atomic . Value
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
// migrationInfo is used during (and possibly after) a seal migration.
// This contains information about the seal we are migrating *from*. Even
// post seal migration, provided the old seal is still in configuration
// migrationInfo will be populated, which on enterprise may be necessary for
// seal rewrap.
migrationInfo * migrationInformation
sealMigrationDone * uint32
2019-03-04 22:11:56 +00:00
2015-03-09 23:33:27 +00:00
// barrier is the security barrier wrapping the physical backend
barrier SecurityBarrier
// router is responsible for managing the mount points for logical backends.
router * Router
2015-03-10 00:45:34 +00:00
2015-03-18 22:21:41 +00:00
// logicalBackends is the mapping of backends to use for this core
logicalBackends map [ string ] logical . Factory
// credentialBackends is the mapping of backends to use for this core
2015-03-31 01:07:05 +00:00
credentialBackends map [ string ] logical . Factory
2015-03-15 23:25:38 +00:00
2015-03-27 20:45:13 +00:00
// auditBackends is the mapping of backends to use for this core
auditBackends map [ string ] audit . Factory
2015-03-10 00:45:34 +00:00
// stateLock protects mutable state
2023-01-11 19:32:05 +00:00
stateLock locking . RWMutex
2018-07-24 20:57:25 +00:00
sealed * uint32
2015-03-10 00:45:34 +00:00
2018-03-07 02:35:58 +00:00
standby bool
2018-08-27 17:01:07 +00:00
perfStandby bool
2018-03-07 02:35:58 +00:00
standbyDoneCh chan struct { }
2020-07-21 12:34:07 +00:00
standbyStopCh * atomic . Value
2018-03-07 02:35:58 +00:00
manualStepDownCh chan struct { }
2018-06-09 19:35:22 +00:00
keepHALockOnStepDown * uint32
2018-03-07 02:35:58 +00:00
heldHALock physical . Lock
2015-04-14 21:06:15 +00:00
2022-12-14 15:59:11 +00:00
// shutdownDoneCh is used to notify when core.Shutdown() completes.
// core.Shutdown() is typically issued in a goroutine to allow Vault to
// release the stateLock. This channel is marked atomic to prevent race
// conditions.
shutdownDoneCh * atomic . Value
2020-02-15 02:07:31 +00:00
2017-01-17 16:47:06 +00:00
// unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce
unlockInfo * unlockInformation
2015-03-10 00:45:34 +00:00
2016-01-15 15:55:35 +00:00
// generateRootProgress holds the shares until we reach enough
2016-01-09 02:21:02 +00:00
// to verify the master key
2016-01-15 15:55:35 +00:00
generateRootConfig * GenerateRootConfig
generateRootProgress [ ] [ ] byte
generateRootLock sync . Mutex
2016-01-09 02:21:02 +00:00
2016-04-04 14:44:22 +00:00
// These variables holds the config and shares we have until we reach
// enough to verify the appropriate master key. Note that the same lock is
// used; this isn't time-critical so this shouldn't be a problem.
2018-05-21 21:46:32 +00:00
barrierRekeyConfig * SealConfig
recoveryRekeyConfig * SealConfig
rekeyLock sync . RWMutex
2015-05-28 18:40:01 +00:00
2015-03-11 22:19:41 +00:00
// mounts is loaded after unseal since it is a protected
// configuration
2015-03-17 22:28:01 +00:00
mounts * MountTable
2015-03-11 22:19:41 +00:00
2015-11-11 16:44:07 +00:00
// mountsLock is used to ensure that the mounts table does not
// change underneath a calling function
2023-08-18 13:09:32 +00:00
mountsLock locking . DeadlockRWMutex
2015-11-11 16:44:07 +00:00
2022-02-17 20:17:59 +00:00
// mountMigrationTracker tracks past and ongoing remount operations
// against their migration ids
mountMigrationTracker * sync . Map
2015-03-18 22:46:07 +00:00
// auth is loaded after unseal since it is a protected
// configuration
2015-03-19 16:54:57 +00:00
auth * MountTable
2015-03-18 22:46:07 +00:00
2015-11-11 16:44:07 +00:00
// authLock is used to ensure that the auth table does not
// change underneath a calling function
2023-08-18 13:09:32 +00:00
authLock locking . DeadlockRWMutex
2015-11-11 16:44:07 +00:00
2015-03-27 20:45:13 +00:00
// audit is loaded after unseal since it is a protected
// configuration
audit * MountTable
2015-11-11 16:44:07 +00:00
// auditLock is used to ensure that the audit table does not
// change underneath a calling function
auditLock sync . RWMutex
2015-03-31 20:22:40 +00:00
// auditBroker is used to ingest the audit events and fan
// out into the configured audit backends
auditBroker * AuditBroker
2017-02-02 19:49:20 +00:00
// auditedHeaders is used to configure which http headers
// can be output in the audit logs
auditedHeaders * AuditedHeadersConfig
2017-10-11 17:21:20 +00:00
// systemBackend is the backend which is used to manage internal operations
2022-02-17 21:08:51 +00:00
systemBackend * SystemBackend
loginMFABackend * LoginMFABackend
2017-10-11 17:21:20 +00:00
2018-09-18 03:03:00 +00:00
// cubbyholeBackend is the backend which manages the per-token storage
cubbyholeBackend * CubbyholeBackend
2015-09-04 20:58:12 +00:00
// systemBarrierView is the barrier view for the system backend
systemBarrierView * BarrierView
2015-03-12 19:41:12 +00:00
2015-04-08 20:35:32 +00:00
// expiration manager is used for managing LeaseIDs,
2015-03-12 19:44:22 +00:00
// renewal, expiration and revocation
expiration * ExpirationManager
2015-03-17 23:23:58 +00:00
// rollback manager is used to run rollbacks periodically
rollback * RollbackManager
2015-03-18 21:00:42 +00:00
// policy store is used to manage named ACL policies
2015-11-06 16:52:26 +00:00
policyStore * PolicyStore
2015-03-18 21:00:42 +00:00
2015-03-23 20:41:05 +00:00
// token store is used to manage authentication tokens
tokenStore * TokenStore
2017-10-11 17:21:20 +00:00
// identityStore is used to manage client entities
identityStore * IdentityStore
2020-09-08 19:22:09 +00:00
// activityLog is used to track active client count
activityLog * ActivityLog
2023-05-19 14:42:50 +00:00
// activityLogLock protects the activityLog and activityLogConfig
activityLogLock sync . RWMutex
2020-09-08 19:22:09 +00:00
2015-04-08 23:43:17 +00:00
// metricsCh is used to stop the metrics streaming
metricsCh chan struct { }
2015-10-12 20:33:54 +00:00
// metricsMutex is used to prevent a race condition between
// metrics emission and sealing leading to a nil pointer
metricsMutex sync . Mutex
2021-12-08 22:34:42 +00:00
// inFlightReqMap is used to store info about in-flight requests
inFlightReqData * InFlightRequests
2022-02-17 21:08:51 +00:00
// mfaResponseAuthQueue is used to cache the auth response per request ID
mfaResponseAuthQueue * LoginMFAPriorityQueue
mfaResponseAuthQueueLock sync . Mutex
2020-05-13 02:00:59 +00:00
// metricSink is the destination for all metrics that have
// a cluster label.
metricSink * metricsutil . ClusterMetricSink
2015-08-27 14:50:16 +00:00
defaultLeaseTTL time . Duration
maxLeaseTTL time . Duration
2015-07-30 13:42:49 +00:00
2018-08-23 19:04:18 +00:00
// baseLogger is used to avoid ResetNamed as it strips useful prefixes in
// e.g. testing
baseLogger log . Logger
logger log . Logger
2016-04-21 13:52:42 +00:00
2022-06-27 15:39:53 +00:00
// log level provided by config, CLI flag, or env
logLevel string
2020-08-10 10:23:44 +00:00
// Disables the trace display for Sentinel checks
sentinelTraceDisabled bool
2016-04-21 20:32:06 +00:00
// cachingDisabled indicates whether caches are disabled
cachingDisabled bool
2018-01-26 03:21:51 +00:00
// Cache stores the actual cache; we always have this but may bypass it if
// disabled
physicalCache physical . ToggleablePurgemonster
2016-07-26 06:25:33 +00:00
2021-12-08 22:34:42 +00:00
// logRequestsLevel indicates at which level requests should be logged
logRequestsLevel * uberAtomic . Int32
2016-09-30 04:06:40 +00:00
// reloadFuncs is a map containing reload functions
2020-02-15 19:58:05 +00:00
reloadFuncs map [ string ] [ ] reloadutil . ReloadFunc
2016-09-30 04:06:40 +00:00
2017-07-03 18:54:01 +00:00
// reloadFuncsLock controls access to the funcs
2016-09-30 04:06:40 +00:00
reloadFuncsLock sync . RWMutex
2017-01-04 21:44:03 +00:00
// wrappingJWTKey is the key used for generating JWTs containing response
// wrapping information
wrappingJWTKey * ecdsa . PrivateKey
2016-08-15 13:42:42 +00:00
//
// Cluster information
//
// Name
2016-07-26 14:01:35 +00:00
clusterName string
2021-02-24 11:58:10 +00:00
// ID
clusterID uberAtomic . String
2017-08-30 20:28:23 +00:00
// Specific cipher suites to use for clustering, if any
clusterCipherSuites [ ] uint16
2017-02-17 01:13:19 +00:00
// Used to modify cluster parameters
2016-08-15 13:42:42 +00:00
clusterParamsLock sync . RWMutex
// The private key stored in the barrier used for establishing
// mutually-authenticated connections between Vault cluster members
2018-02-23 19:47:07 +00:00
localClusterPrivateKey * atomic . Value
2016-08-15 13:42:42 +00:00
// The local cluster cert
2018-02-23 19:47:07 +00:00
localClusterCert * atomic . Value
2017-03-02 15:03:49 +00:00
// The parsed form of the local cluster cert
2018-02-23 19:47:07 +00:00
localClusterParsedCert * atomic . Value
2016-08-19 15:03:53 +00:00
// The TCP addresses we should use for clustering
clusterListenerAddrs [ ] * net . TCPAddr
2017-05-24 14:38:48 +00:00
// The handler to use for request forwarding
clusterHandler http . Handler
2016-08-15 13:42:42 +00:00
// Write lock used to ensure that we don't have multiple connections adjust
// this value at the same time
requestForwardingConnectionLock sync . RWMutex
2019-02-06 02:01:18 +00:00
// Lock for the leader values, ensuring we don't run the parts of Leader()
// that change things concurrently
leaderParamsLock sync . RWMutex
// Current cluster leader values
clusterLeaderParams * atomic . Value
2017-05-25 00:51:53 +00:00
// Info on cluster members
2017-05-25 01:10:32 +00:00
clusterPeerClusterAddrsCache * cache . Cache
2017-05-24 19:06:56 +00:00
// The context for the client
rpcClientConnContext context . Context
2016-08-19 15:03:53 +00:00
// The function for canceling the client connection
rpcClientConnCancelFunc context . CancelFunc
// The grpc ClientConn for RPC calls
rpcClientConn * grpc . ClientConn
// The grpc forwarding client
2017-05-24 19:06:56 +00:00
rpcForwardingClient * forwardingClient
2019-06-20 19:14:58 +00:00
// The UUID used to hold the leader lock. Only set on active node
leaderUUID string
2017-01-13 19:51:10 +00:00
2017-06-17 04:04:55 +00:00
// CORS Information
corsConfig * CORSConfig
2017-01-13 19:51:10 +00:00
// replicationState keeps the current replication state cached for quick
2018-01-20 00:24:04 +00:00
// lookup; activeNodeReplicationState stores the active value on standbys
replicationState * uint32
activeNodeReplicationState * uint32
2017-02-24 15:45:29 +00:00
2018-03-27 20:23:33 +00:00
// uiConfig contains UI configuration
uiConfig * UIConfig
2017-04-04 00:52:29 +00:00
2017-09-15 04:21:35 +00:00
// rawEnabled indicates whether the Raw endpoint is enabled
rawEnabled bool
2022-12-15 20:19:19 +00:00
// inspectableEnabled indicates whether the Inspect endpoint is enabled
introspectionEnabled bool
introspectionEnabledLock sync . Mutex
2017-04-11 00:12:52 +00:00
// pluginDirectory is the location vault will look for plugin binaries
2017-04-04 00:52:29 +00:00
pluginDirectory string
2022-04-04 16:45:41 +00:00
// pluginFileUid is the uid of the plugin files and directory
pluginFileUid int
// pluginFilePermissions is the permissions of the plugin files and directory
pluginFilePermissions int
2017-04-04 00:52:29 +00:00
// pluginCatalog is used to manage plugin configurations
pluginCatalog * PluginCatalog
2017-04-11 00:12:52 +00:00
2022-11-15 23:07:52 +00:00
// The userFailedLoginInfo map has user failed login information.
// It has user information (alias-name and mount accessor) as a key
// and login counter, last failed login time as value
userFailedLoginInfo map [ FailedLoginUser ] * FailedLoginInfo
2022-12-07 01:22:46 +00:00
// userFailedLoginInfoLock controls access to the userFailedLoginInfoMap
userFailedLoginInfoLock sync . RWMutex
2017-04-24 19:21:49 +00:00
enableMlock bool
2017-08-04 20:42:51 +00:00
// This can be used to trigger operations to stop running when Vault is
// going to be shut down, stepped down, or sealed
2018-01-19 06:44:44 +00:00
activeContext context . Context
2018-08-01 19:07:37 +00:00
activeContextCancelFunc * atomic . Value
2018-02-09 21:37:40 +00:00
// Stores the sealunwrapper for downgrade needs
sealUnwrapper physical . Backend
2018-04-19 17:29:43 +00:00
2019-01-23 21:34:34 +00:00
// unsealwithStoredKeysLock is a mutex that prevents multiple processes from
// unsealing with stored keys are the same time.
unsealWithStoredKeysLock sync . Mutex
2018-04-19 17:29:43 +00:00
// Stores any funcs that should be run on successful postUnseal
postUnsealFuncs [ ] func ( )
2018-09-05 19:52:54 +00:00
2019-10-15 04:55:31 +00:00
// Stores any funcs that should be run on successful barrier unseal in
// recovery mode
postRecoveryUnsealFuncs [ ] func ( ) error
2018-09-18 03:03:00 +00:00
// replicationFailure is used to mark when replication has entered an
// unrecoverable failure.
replicationFailure * uint32
// disablePerfStanby is used to tell a standby not to attempt to become a
// perf standby
disablePerfStandby bool
licensingStopCh chan struct { }
2018-09-05 19:52:54 +00:00
// Stores loggers so we can reset the level
allLoggers [ ] log . Logger
allLoggersLock sync . RWMutex
2019-02-06 02:01:18 +00:00
// Can be toggled atomically to cause the core to never try to become
// active, or give up active as soon as it gets it
neverBecomeActive * uint32
2019-02-08 21:32:06 +00:00
2019-02-15 02:14:56 +00:00
// clusterListener starts up and manages connections on the cluster ports
2019-09-03 15:59:56 +00:00
clusterListener * atomic . Value
2019-02-20 20:12:21 +00:00
2021-10-13 15:06:33 +00:00
// customListenerHeader holds custom response headers for a listener
customListenerHeader * atomic . Value
2019-02-14 20:46:59 +00:00
// Telemetry objects
2019-02-20 20:12:21 +00:00
metricsHelper * metricsutil . MetricsHelper
2019-03-05 19:55:07 +00:00
2021-03-03 18:59:50 +00:00
// raftFollowerStates tracks information about all the raft follower nodes.
raftFollowerStates * raft . FollowerStates
2019-06-20 19:14:58 +00:00
// Stop channel for raft TLS rotations
raftTLSRotationStopCh chan struct { }
2019-07-03 20:56:30 +00:00
// Stores the pending peers we are waiting to give answers
2020-05-19 01:22:25 +00:00
pendingRaftPeers * sync . Map
2019-06-20 19:14:58 +00:00
2019-10-08 17:57:15 +00:00
// rawConfig stores the config as-is from the provided server configuration.
2020-04-16 23:34:46 +00:00
rawConfig * atomic . Value
2019-10-08 17:57:15 +00:00
2019-06-10 18:07:16 +00:00
coreNumber int
2019-10-15 04:55:31 +00:00
2019-10-17 17:33:00 +00:00
// secureRandomReader is the reader used for CSP operations
secureRandomReader io . Reader
2019-10-15 04:55:31 +00:00
recoveryMode bool
2020-01-17 07:03:02 +00:00
clusterNetworkLayer cluster . NetworkLayer
2020-01-21 17:24:33 +00:00
2020-01-17 07:03:02 +00:00
// PR1103disabled is used to test upgrade workflows: when set to true,
// the correct behaviour for namespaced cubbyholes is disabled, so we
// can test an upgrade to a version that includes the fixes from
// https://github.com/hashicorp/vault-enterprise/pull/1103
PR1103disabled bool
2020-06-26 21:13:16 +00:00
quotaManager * quotas . Manager
2020-07-27 20:10:26 +00:00
clusterHeartbeatInterval time . Duration
2020-10-29 23:47:34 +00:00
2023-05-19 14:42:50 +00:00
// activityLogConfig contains override values for the activity log
// it is protected by activityLogLock
2020-10-29 23:47:34 +00:00
activityLogConfig ActivityLogCoreConfig
2020-12-08 18:57:44 +00:00
2023-05-19 14:42:50 +00:00
censusConfig atomic . Value
2023-04-21 19:29:37 +00:00
2020-12-12 00:50:19 +00:00
// activeTime is set on active nodes indicating the time at which this node
// became active.
activeTime time . Time
2020-12-08 18:57:44 +00:00
// KeyRotateGracePeriod is how long we allow an upgrade path
// for standby instances before we delete the upgrade keys
keyRotateGracePeriod * int64
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
2021-02-25 20:27:25 +00:00
autoRotateCancel context . CancelFunc
2023-08-17 21:41:15 +00:00
updateLockedUserEntriesCancel context . CancelFunc
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
// number of workers to use for lease revocation in the expiration manager
numExpirationWorkers int
2021-02-24 11:58:10 +00:00
IndexHeaderHMACKey uberAtomic . Value
2021-03-03 18:59:50 +00:00
// disableAutopilot is used to disable the autopilot subsystem in raft storage
disableAutopilot bool
2021-04-20 22:25:04 +00:00
// enable/disable identifying response headers
enableResponseHeaderHostname bool
enableResponseHeaderRaftNodeID bool
2021-10-14 16:10:59 +00:00
2022-02-17 19:43:07 +00:00
// disableSSCTokens is used to disable server side consistent token creation/usage
disableSSCTokens bool
2022-04-19 18:28:08 +00:00
// versionHistory is a map of vault versions to VaultVersion. The
// VaultVersion.TimestampInstalled when the version will denote when the version
2021-11-16 20:05:59 +00:00
// was first run. Note that because perf standbys should be upgraded first, and
// only the active node will actually write the new version timestamp, a perf
// standby shouldn't rely on the stored version timestamps being present.
2022-04-19 18:28:08 +00:00
versionHistory map [ string ] VaultVersion
2022-10-06 18:24:16 +00:00
// effectiveSDKVersion contains the SDK version that standby nodes should use when
// heartbeating with the active node. Default to the current SDK version.
effectiveSDKVersion string
2022-10-13 13:59:07 +00:00
2023-10-17 12:33:54 +00:00
numRollbackWorkers int
rollbackPeriod time . Duration
2022-12-14 18:06:33 +00:00
2023-01-16 16:07:18 +00:00
experiments [ ] string
2022-12-14 18:06:33 +00:00
pendingRemovalMountsAllowed bool
2022-12-15 18:09:36 +00:00
expirationRevokeRetryBase time . Duration
2023-01-17 21:34:37 +00:00
events * eventbus . EventBus
Add path based primary write forwarding (PBPWF) - OSS (#18735)
* Add WriteForwardedStorage to sdk's plugin, logical in OSS
This should allow backends to specify paths to forward write
(storage.Put(...) and storage.Delete(...)) operations for.
Notably, these semantics are subject to change and shouldn't yet be
relied on.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Collect paths for write forwarding in OSS
This adds a path manager to Core, allowing tracking across all Vault
versions of paths which could use write forwarding if available. In
particular, even on OSS offerings, we'll need to template {{clusterId}}
into the paths, in the event of later upgrading to Enterprise. If we
didn't, we'd end up writing paths which will no longer be accessible
post-migration, due to write forwarding now replacing the sentinel with
the actual cluster identifier.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add forwarded writer implementation to OSS
Here, for paths given to us, we determine if we need to do cluster
translation and perform local writing. This is the OSS variant.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Wire up mount-specific request forwarding in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Clarify that state lock needs to be held to call HAState in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Move cluster sentinel constant to sdk/logical
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Expose ClusterID to Plugins via SystemView
This will let plugins learn what the Cluster's ID is, without having to
resort to hacks like writing a random string to its cluster-prefixed
namespace and then reading it once it has replicated.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add GRPC ClusterID implementation
For any external plugins which wish to use it.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2023-01-20 21:36:18 +00:00
// writeForwardedPaths are a set of storage paths which are GRPC forwarded
// to the active node of the primary cluster, when present. This PathManager
// contains absolute paths that we intend to forward (and template) when
// we're on a secondary cluster.
writeForwardedPaths * pathmanager . PathManager
2023-02-15 19:57:57 +00:00
// if populated, the callback is called for every request
// for testing purposes
requestResponseCallback func ( logical . Backend , * logical . Request , * logical . Response )
2023-09-01 13:07:47 +00:00
// If any role based quota (LCQ or RLQ) is enabled, don't track lease counts by role
impreciseLeaseRoleTracking bool
2023-10-30 17:21:47 +00:00
// Config value for "detect_deadlocks".
detectDeadlocks [ ] string
2015-03-09 23:33:27 +00:00
}
Add path based primary write forwarding (PBPWF) - OSS (#18735)
* Add WriteForwardedStorage to sdk's plugin, logical in OSS
This should allow backends to specify paths to forward write
(storage.Put(...) and storage.Delete(...)) operations for.
Notably, these semantics are subject to change and shouldn't yet be
relied on.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Collect paths for write forwarding in OSS
This adds a path manager to Core, allowing tracking across all Vault
versions of paths which could use write forwarding if available. In
particular, even on OSS offerings, we'll need to template {{clusterId}}
into the paths, in the event of later upgrading to Enterprise. If we
didn't, we'd end up writing paths which will no longer be accessible
post-migration, due to write forwarding now replacing the sentinel with
the actual cluster identifier.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add forwarded writer implementation to OSS
Here, for paths given to us, we determine if we need to do cluster
translation and perform local writing. This is the OSS variant.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Wire up mount-specific request forwarding in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Clarify that state lock needs to be held to call HAState in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Move cluster sentinel constant to sdk/logical
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Expose ClusterID to Plugins via SystemView
This will let plugins learn what the Cluster's ID is, without having to
resort to hacks like writing a random string to its cluster-prefixed
namespace and then reading it once it has replicated.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add GRPC ClusterID implementation
For any external plugins which wish to use it.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2023-01-20 21:36:18 +00:00
// c.stateLock needs to be held in read mode before calling this function.
2021-08-30 19:31:11 +00:00
func ( c * Core ) HAState ( ) consts . HAState {
switch {
case c . perfStandby :
return consts . PerfStandby
case c . standby :
return consts . Standby
default :
return consts . Active
}
}
2023-01-06 22:06:54 +00:00
func ( c * Core ) HAStateWithLock ( ) consts . HAState {
c . stateLock . RLock ( )
2023-10-04 19:19:12 +00:00
defer c . stateLock . RUnlock ( )
2023-01-06 22:06:54 +00:00
return c . HAState ( )
}
2015-03-11 18:52:01 +00:00
// CoreConfig is used to parameterize a core
type CoreConfig struct {
2020-02-15 00:39:13 +00:00
entCoreConfig
2019-12-06 14:46:39 +00:00
DevToken string
2017-02-17 01:13:19 +00:00
2019-12-06 14:46:39 +00:00
BuiltinRegistry BuiltinRegistry
2018-11-07 01:21:24 +00:00
2019-12-06 14:46:39 +00:00
LogicalBackends map [ string ] logical . Factory
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
CredentialBackends map [ string ] logical . Factory
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
AuditBackends map [ string ] audit . Factory
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
Physical physical . Backend
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
StorageType string
2019-09-18 19:07:18 +00:00
2016-07-26 06:25:33 +00:00
// May be nil, which disables HA operations
2019-12-06 14:46:39 +00:00
HAPhysical physical . HABackend
ServiceRegistration sr . ServiceRegistration
2016-07-26 06:25:33 +00:00
2020-10-23 18:16:04 +00:00
// Seal is the configured seal, or if none is configured explicitly, a
// shamir seal. In migration scenarios this is the new seal.
Seal Seal
// Unwrap seal is the optional seal marked "disabled"; this is the old
// seal in migration scenarios.
2020-06-11 19:07:59 +00:00
UnwrapSeal Seal
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
SecureRandomReader io . Reader
2019-10-17 17:33:00 +00:00
2022-06-27 15:39:53 +00:00
LogLevel string
2019-12-06 14:46:39 +00:00
Logger log . Logger
2016-07-26 06:25:33 +00:00
2023-01-11 19:32:05 +00:00
// Use the deadlocks library to detect deadlocks
DetectDeadlocks string
2023-09-01 13:07:47 +00:00
// If any role based quota (LCQ or RLQ) is enabled, don't track lease counts by role
ImpreciseLeaseRoleTracking bool
2020-08-10 10:23:44 +00:00
// Disables the trace display for Sentinel checks
DisableSentinelTrace bool
2016-07-26 06:25:33 +00:00
// Disables the LRU cache on the physical backend
2019-12-06 14:46:39 +00:00
DisableCache bool
2016-07-26 06:25:33 +00:00
// Disables mlock syscall
2019-12-06 14:46:39 +00:00
DisableMlock bool
2016-07-26 06:25:33 +00:00
2016-08-26 14:27:06 +00:00
// Custom cache size for the LRU cache on the physical backend, or zero for default
2019-12-06 14:46:39 +00:00
CacheSize int
2016-07-26 06:25:33 +00:00
// Set as the leader address for HA
2019-12-06 14:46:39 +00:00
RedirectAddr string
2016-08-15 13:42:42 +00:00
// Set as the cluster address for HA
2019-12-06 14:46:39 +00:00
ClusterAddr string
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
DefaultLeaseTTL time . Duration
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
MaxLeaseTTL time . Duration
2016-07-26 06:25:33 +00:00
2019-12-06 14:46:39 +00:00
ClusterName string
2016-09-30 04:06:40 +00:00
2019-12-06 14:46:39 +00:00
ClusterCipherSuites string
2017-08-30 20:28:23 +00:00
2019-12-06 14:46:39 +00:00
EnableUI bool
2017-02-24 15:45:29 +00:00
2017-09-15 04:21:35 +00:00
// Enable the raw endpoint
2019-12-06 14:46:39 +00:00
EnableRaw bool
2017-09-15 04:21:35 +00:00
2022-12-15 20:19:19 +00:00
// Enable the introspection endpoint
EnableIntrospection bool
2019-12-06 14:46:39 +00:00
PluginDirectory string
2017-04-04 00:52:29 +00:00
2022-04-04 16:45:41 +00:00
PluginFileUid int
PluginFilePermissions int
2019-12-06 14:46:39 +00:00
DisableSealWrap bool
2018-09-18 03:03:00 +00:00
2019-10-08 17:57:15 +00:00
RawConfig * server . Config
2020-02-15 19:58:05 +00:00
ReloadFuncs * map [ string ] [ ] reloadutil . ReloadFunc
2016-09-30 04:06:40 +00:00
ReloadFuncsLock * sync . RWMutex
2018-09-05 19:52:54 +00:00
2018-09-18 03:03:00 +00:00
// Licensing
2021-05-07 12:55:41 +00:00
License string
LicensePath string
2018-09-18 03:03:00 +00:00
LicensingConfig * LicensingConfig
2023-03-20 14:51:35 +00:00
// Configured Census Agent
2023-04-21 19:29:37 +00:00
CensusAgent CensusReporter
2023-03-20 14:51:35 +00:00
2018-09-18 03:03:00 +00:00
DisablePerformanceStandby bool
2018-10-23 19:03:17 +00:00
DisableIndexing bool
2018-11-19 21:13:16 +00:00
DisableKeyEncodingChecks bool
2018-09-18 03:03:00 +00:00
2018-09-05 19:52:54 +00:00
AllLoggers [ ] log . Logger
2019-02-14 20:46:59 +00:00
// Telemetry objects
MetricsHelper * metricsutil . MetricsHelper
2020-05-13 02:00:59 +00:00
MetricSink * metricsutil . ClusterMetricSink
2019-03-05 19:55:07 +00:00
2019-10-15 04:55:31 +00:00
RecoveryMode bool
2020-01-17 07:03:02 +00:00
ClusterNetworkLayer cluster . NetworkLayer
2015-03-11 18:52:01 +00:00
2020-07-27 20:10:26 +00:00
ClusterHeartbeatInterval time . Duration
2020-10-29 23:47:34 +00:00
// Activity log controls
ActivityLogConfig ActivityLogCoreConfig
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
// number of workers to use for lease revocation in the expiration manager
NumExpirationWorkers int
2021-03-03 18:59:50 +00:00
// DisableAutopilot is used to disable autopilot subsystem in raft storage
DisableAutopilot bool
2021-04-20 22:25:04 +00:00
// Whether to send headers in the HTTP response showing hostname or raft node ID
EnableResponseHeaderHostname bool
EnableResponseHeaderRaftNodeID bool
2022-02-17 19:43:07 +00:00
// DisableSSCTokens is used to disable the use of server side consistent tokens
DisableSSCTokens bool
2022-10-06 18:24:16 +00:00
EffectiveSDKVersion string
2022-10-13 13:59:07 +00:00
RollbackPeriod time . Duration
2022-12-14 18:06:33 +00:00
2023-01-16 16:07:18 +00:00
Experiments [ ] string
2022-12-14 18:06:33 +00:00
PendingRemovalMountsAllowed bool
2022-12-15 18:09:36 +00:00
ExpirationRevokeRetryBase time . Duration
2023-07-06 10:05:43 +00:00
// AdministrativeNamespacePath is used to configure the administrative namespace, which has access to some sys endpoints that are
// only accessible in the root namespace, currently sys/audit-hash and sys/monitor.
AdministrativeNamespacePath string
2023-10-12 15:24:52 +00:00
2023-10-17 12:33:54 +00:00
NumRollbackWorkers int
2018-10-23 06:34:02 +00:00
}
2019-12-06 14:46:39 +00:00
// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
// not exist.
func ( c * CoreConfig ) GetServiceRegistration ( ) sr . ServiceRegistration {
2020-05-15 18:06:58 +00:00
// Check whether there is a ServiceRegistration explicitly configured
2019-12-06 14:46:39 +00:00
if c . ServiceRegistration != nil {
return c . ServiceRegistration
}
// Check if HAPhysical is configured and implements ServiceRegistration
if c . HAPhysical != nil && c . HAPhysical . HAEnabled ( ) {
if disc , ok := c . HAPhysical . ( sr . ServiceRegistration ) ; ok {
return disc
}
}
// No service discovery is available.
return nil
}
2021-06-10 19:29:32 +00:00
// CreateCore conducts static validations on the Core Config
// and returns an uninitialized core.
func CreateCore ( conf * CoreConfig ) ( * Core , error ) {
2016-08-15 13:42:42 +00:00
if conf . HAPhysical != nil && conf . HAPhysical . HAEnabled ( ) {
if conf . RedirectAddr == "" {
2018-01-04 15:45:40 +00:00
return nil , fmt . Errorf ( "missing API address, please set in configuration or via environment" )
2016-08-15 13:42:42 +00:00
}
2015-04-14 23:44:48 +00:00
}
2015-04-14 21:06:15 +00:00
2015-08-27 14:50:16 +00:00
if conf . DefaultLeaseTTL == 0 {
conf . DefaultLeaseTTL = defaultLeaseTTL
2015-07-30 13:42:49 +00:00
}
2015-08-27 14:50:16 +00:00
if conf . MaxLeaseTTL == 0 {
conf . MaxLeaseTTL = maxLeaseTTL
2015-07-30 13:42:49 +00:00
}
2015-08-27 14:50:16 +00:00
if conf . DefaultLeaseTTL > conf . MaxLeaseTTL {
return nil , fmt . Errorf ( "cannot have DefaultLeaseTTL larger than MaxLeaseTTL" )
2015-07-30 13:42:49 +00:00
}
2015-08-20 17:14:13 +00:00
2015-05-02 20:28:33 +00:00
// Validate the advertise addr if its given to us
2016-08-15 13:42:42 +00:00
if conf . RedirectAddr != "" {
u , err := url . Parse ( conf . RedirectAddr )
2015-05-02 20:28:33 +00:00
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "redirect address is not valid url: %w" , err )
2015-05-02 20:28:33 +00:00
}
if u . Scheme == "" {
2016-08-19 14:52:14 +00:00
return nil , fmt . Errorf ( "redirect address must include scheme (ex. 'http')" )
2015-05-02 20:28:33 +00:00
}
}
2016-08-26 14:27:06 +00:00
// Make a default logger if not provided
if conf . Logger == nil {
2018-04-03 00:46:59 +00:00
conf . Logger = logging . NewVaultLogger ( log . Trace )
2016-08-26 14:27:06 +00:00
}
2020-05-13 02:00:59 +00:00
// Make a default metric sink if not provided
if conf . MetricSink == nil {
conf . MetricSink = metricsutil . BlackholeSink ( )
}
2019-10-08 17:57:15 +00:00
// Instantiate a non-nil raw config if none is provided
if conf . RawConfig == nil {
conf . RawConfig = new ( server . Config )
}
2019-10-17 17:33:00 +00:00
// secureRandomReader cannot be nil
if conf . SecureRandomReader == nil {
conf . SecureRandomReader = rand . Reader
}
2020-07-27 20:10:26 +00:00
clusterHeartbeatInterval := conf . ClusterHeartbeatInterval
if clusterHeartbeatInterval == 0 {
clusterHeartbeatInterval = 5 * time . Second
}
Vault-1403 Switch Expiration Manager to use Fairsharing Backpressure (#1709) (#10932)
* basic pool and start testing
* refactor a bit for testing
* workFunc, start/stop safety, testing
* cleanup function for worker quit, more tests
* redo public/private members
* improve tests, export types, switch uuid package
* fix loop capture bug, cleanup
* cleanup tests
* update worker pool file name, other improvements
* add job manager prototype
* remove remnants
* add functions to wait for job manager and worker pool to stop, other fixes
* test job manager functionality, fix bugs
* encapsulate how jobs are distributed to workers
* make worker job channel read only
* add job interface, more testing, fixes
* set name for dispatcher
* fix test races
* wire up expiration manager most of the way
* dispatcher and job manager constructors don't return errors
* logger now dependency injected
* make some members private, test fcn to get worker pool size
* make GetNumWorkers public
* Update helper/fairshare/jobmanager_test.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* update fairsharing usage, add tests
* make workerpool private
* remove custom worker names
* concurrency improvements
* remove worker pool cleanup function
* remove cleanup func from job manager, remove non blocking stop from fairshare
* update job manager for new constructor
* stop job manager when expiration manager stopped
* unset env var after test
* stop fairshare when started in tests
* stop leaking job manager goroutine
* prototype channel for waking up to assign work
* fix typo/bug and add tests
* improve job manager wake up, fix test typo
* put channel drain back
* better start/pause test for job manager
* comment cleanup
* degrade possible noisy log
* remove closure, clean up context
* improve revocation context timer
* test: reduce number of revocation workers during many tests
* Update vault/expiration.go
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
* feedback tweaks
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2021-02-17 22:30:27 +00:00
if conf . NumExpirationWorkers == 0 {
conf . NumExpirationWorkers = numExpirationWorkersDefault
}
2023-10-17 12:33:54 +00:00
if conf . NumRollbackWorkers == 0 {
conf . NumRollbackWorkers = RollbackDefaultNumWorkers
}
2023-01-11 19:32:05 +00:00
2022-10-06 18:24:16 +00:00
effectiveSDKVersion := conf . EffectiveSDKVersion
if effectiveSDKVersion == "" {
effectiveSDKVersion = version . GetVersion ( ) . Version
}
2023-10-30 17:21:47 +00:00
var detectDeadlocks [ ] string
if conf . DetectDeadlocks != "" {
detectDeadlocks = strings . Split ( conf . DetectDeadlocks , "," )
for k , v := range detectDeadlocks {
detectDeadlocks [ k ] = strings . ToLower ( strings . TrimSpace ( v ) )
}
}
// Use imported logging deadlock if requested
var stateLock locking . RWMutex
stateLock = & locking . SyncRWMutex { }
for _ , v := range detectDeadlocks {
if v == "statelock" {
stateLock = & locking . DeadlockRWMutex { }
}
}
2015-03-09 23:33:27 +00:00
// Setup the core
c := & Core {
2021-10-13 15:06:33 +00:00
entCore : entCore { } ,
devToken : conf . DevToken ,
physical : conf . Physical ,
serviceRegistration : conf . GetServiceRegistration ( ) ,
underlyingPhysical : conf . Physical ,
storageType : conf . StorageType ,
redirectAddr : conf . RedirectAddr ,
clusterAddr : new ( atomic . Value ) ,
clusterListener : new ( atomic . Value ) ,
customListenerHeader : new ( atomic . Value ) ,
seal : conf . Seal ,
2023-01-11 19:32:05 +00:00
stateLock : stateLock ,
2021-10-13 15:06:33 +00:00
router : NewRouter ( ) ,
sealed : new ( uint32 ) ,
sealMigrationDone : new ( uint32 ) ,
standby : true ,
standbyStopCh : new ( atomic . Value ) ,
baseLogger : conf . Logger ,
logger : conf . Logger . Named ( "core" ) ,
2022-06-27 15:39:53 +00:00
logLevel : conf . LogLevel ,
2020-05-21 20:07:50 +00:00
2021-07-29 17:21:40 +00:00
defaultLeaseTTL : conf . DefaultLeaseTTL ,
maxLeaseTTL : conf . MaxLeaseTTL ,
sentinelTraceDisabled : conf . DisableSentinelTrace ,
cachingDisabled : conf . DisableCache ,
clusterName : conf . ClusterName ,
clusterNetworkLayer : conf . ClusterNetworkLayer ,
clusterPeerClusterAddrsCache : cache . New ( 3 * clusterHeartbeatInterval , time . Second ) ,
enableMlock : ! conf . DisableMlock ,
rawEnabled : conf . EnableRaw ,
2022-12-15 20:19:19 +00:00
introspectionEnabled : conf . EnableIntrospection ,
2022-12-14 15:59:11 +00:00
shutdownDoneCh : new ( atomic . Value ) ,
2021-07-29 17:21:40 +00:00
replicationState : new ( uint32 ) ,
localClusterPrivateKey : new ( atomic . Value ) ,
localClusterCert : new ( atomic . Value ) ,
localClusterParsedCert : new ( atomic . Value ) ,
activeNodeReplicationState : new ( uint32 ) ,
keepHALockOnStepDown : new ( uint32 ) ,
replicationFailure : new ( uint32 ) ,
disablePerfStandby : true ,
activeContextCancelFunc : new ( atomic . Value ) ,
allLoggers : conf . AllLoggers ,
builtinRegistry : conf . BuiltinRegistry ,
neverBecomeActive : new ( uint32 ) ,
clusterLeaderParams : new ( atomic . Value ) ,
metricsHelper : conf . MetricsHelper ,
metricSink : conf . MetricSink ,
secureRandomReader : conf . SecureRandomReader ,
rawConfig : new ( atomic . Value ) ,
2021-04-20 22:25:04 +00:00
recoveryMode : conf . RecoveryMode ,
postUnsealStarted : new ( uint32 ) ,
2022-08-03 22:40:49 +00:00
raftInfo : new ( atomic . Value ) ,
2021-04-20 22:25:04 +00:00
raftJoinDoneCh : make ( chan struct { } ) ,
clusterHeartbeatInterval : clusterHeartbeatInterval ,
activityLogConfig : conf . ActivityLogConfig ,
keyRotateGracePeriod : new ( int64 ) ,
numExpirationWorkers : conf . NumExpirationWorkers ,
raftFollowerStates : raft . NewFollowerStates ( ) ,
disableAutopilot : conf . DisableAutopilot ,
enableResponseHeaderHostname : conf . EnableResponseHeaderHostname ,
enableResponseHeaderRaftNodeID : conf . EnableResponseHeaderRaftNodeID ,
2022-02-17 20:17:59 +00:00
mountMigrationTracker : & sync . Map { } ,
2022-02-17 19:43:07 +00:00
disableSSCTokens : conf . DisableSSCTokens ,
2022-10-06 18:24:16 +00:00
effectiveSDKVersion : effectiveSDKVersion ,
2022-11-15 23:07:52 +00:00
userFailedLoginInfo : make ( map [ FailedLoginUser ] * FailedLoginInfo ) ,
2023-01-16 16:07:18 +00:00
experiments : conf . Experiments ,
2022-12-14 18:06:33 +00:00
pendingRemovalMountsAllowed : conf . PendingRemovalMountsAllowed ,
2022-12-15 18:09:36 +00:00
expirationRevokeRetryBase : conf . ExpirationRevokeRetryBase ,
2023-10-17 12:33:54 +00:00
numRollbackWorkers : conf . NumRollbackWorkers ,
2023-09-01 13:07:47 +00:00
impreciseLeaseRoleTracking : conf . ImpreciseLeaseRoleTracking ,
2023-10-30 17:21:47 +00:00
detectDeadlocks : detectDeadlocks ,
2015-03-09 23:33:27 +00:00
}
2022-05-20 20:49:11 +00:00
2020-07-21 12:34:07 +00:00
c . standbyStopCh . Store ( make ( chan struct { } ) )
2018-07-24 20:57:25 +00:00
atomic . StoreUint32 ( c . sealed , 1 )
2020-06-17 14:50:28 +00:00
c . metricSink . SetGaugeWithLabels ( [ ] string { "core" , "unsealed" } , 0 , nil )
2022-12-14 15:59:11 +00:00
c . shutdownDoneCh . Store ( make ( chan struct { } ) )
2023-11-03 21:40:17 +00:00
c . allLoggers = append ( c . allLoggers , c . logger )
2019-07-03 02:16:43 +00:00
c . router . logger = c . logger . Named ( "router" )
2023-11-03 21:40:17 +00:00
c . allLoggers = append ( c . allLoggers , c . router . logger )
2019-07-03 02:16:43 +00:00
2021-12-08 22:34:42 +00:00
c . inFlightReqData = & InFlightRequests {
InFlightReqMap : & sync . Map { } ,
InFlightReqCount : uberAtomic . NewUint64 ( 0 ) ,
}
2020-07-30 17:15:00 +00:00
c . SetConfig ( conf . RawConfig )
2018-01-23 02:44:38 +00:00
atomic . StoreUint32 ( c . replicationState , uint32 ( consts . ReplicationDRDisabled | consts . ReplicationPerformanceDisabled ) )
2018-02-23 19:47:07 +00:00
c . localClusterCert . Store ( ( [ ] byte ) ( nil ) )
c . localClusterParsedCert . Store ( ( * x509 . Certificate ) ( nil ) )
c . localClusterPrivateKey . Store ( ( * ecdsa . PrivateKey ) ( nil ) )
2018-01-23 02:44:38 +00:00
2019-02-06 02:01:18 +00:00
c . clusterLeaderParams . Store ( ( * ClusterLeaderParams ) ( nil ) )
2019-06-27 17:00:03 +00:00
c . clusterAddr . Store ( conf . ClusterAddr )
2018-08-01 19:07:37 +00:00
c . activeContextCancelFunc . Store ( ( context . CancelFunc ) ( nil ) )
2020-12-08 18:57:44 +00:00
atomic . StoreInt64 ( c . keyRotateGracePeriod , int64 ( 2 * time . Minute ) )
2018-08-01 19:07:37 +00:00
2022-09-06 18:11:04 +00:00
c . hcpLinkStatus = HCPLinkStatus {
lock : sync . RWMutex { } ,
ConnectionStatus : "disconnected" ,
}
2022-08-03 22:40:49 +00:00
c . raftInfo . Store ( ( * raftInformation ) ( nil ) )
2019-10-28 16:51:45 +00:00
switch conf . ClusterCipherSuites {
2019-11-19 04:04:49 +00:00
case "tls13" , "tls12" :
2019-10-28 16:51:45 +00:00
// Do nothing, let Go use the default
case "" :
// Add in forward compatible TLS 1.3 suites, followed by handpicked 1.2 suites
c . clusterCipherSuites = [ ] uint16 {
// 1.3
tls . TLS_AES_128_GCM_SHA256 ,
tls . TLS_AES_256_GCM_SHA384 ,
tls . TLS_CHACHA20_POLY1305_SHA256 ,
// 1.2
tls . TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ,
tls . TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ,
tls . TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 ,
}
default :
2017-08-30 20:28:23 +00:00
suites , err := tlsutil . ParseCiphers ( conf . ClusterCipherSuites )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "error parsing cluster cipher suites: %w" , err )
2017-08-30 20:28:23 +00:00
}
c . clusterCipherSuites = suites
}
2017-08-07 14:03:30 +00:00
// Load CORS config and provide a value for the core field.
2018-06-09 20:57:57 +00:00
c . corsConfig = & CORSConfig {
core : c ,
Enabled : new ( uint32 ) ,
}
2017-06-17 05:26:25 +00:00
Add path based primary write forwarding (PBPWF) - OSS (#18735)
* Add WriteForwardedStorage to sdk's plugin, logical in OSS
This should allow backends to specify paths to forward write
(storage.Put(...) and storage.Delete(...)) operations for.
Notably, these semantics are subject to change and shouldn't yet be
relied on.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Collect paths for write forwarding in OSS
This adds a path manager to Core, allowing tracking across all Vault
versions of paths which could use write forwarding if available. In
particular, even on OSS offerings, we'll need to template {{clusterId}}
into the paths, in the event of later upgrading to Enterprise. If we
didn't, we'd end up writing paths which will no longer be accessible
post-migration, due to write forwarding now replacing the sentinel with
the actual cluster identifier.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add forwarded writer implementation to OSS
Here, for paths given to us, we determine if we need to do cluster
translation and perform local writing. This is the OSS variant.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Wire up mount-specific request forwarding in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Clarify that state lock needs to be held to call HAState in OSS
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Move cluster sentinel constant to sdk/logical
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Expose ClusterID to Plugins via SystemView
This will let plugins learn what the Cluster's ID is, without having to
resort to hacks like writing a random string to its cluster-prefixed
namespace and then reading it once it has replicated.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
* Add GRPC ClusterID implementation
For any external plugins which wish to use it.
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
2023-01-20 21:36:18 +00:00
// Load write-forwarded path manager.
c . writeForwardedPaths = pathmanager . New ( )
// Load seal information.
2017-10-23 20:03:36 +00:00
if c . seal == nil {
2022-08-23 19:37:16 +00:00
wrapper := aeadwrapper . NewShamirWrapper ( )
wrapper . SetConfig ( context . Background ( ) , awskms . WithLogger ( c . logger . Named ( "shamir" ) ) )
2023-05-04 18:22:30 +00:00
c . seal = NewDefaultSeal ( vaultseal . NewAccess ( wrapper ) )
2017-10-23 20:03:36 +00:00
}
c . seal . SetCore ( c )
2021-06-10 19:29:32 +00:00
return c , nil
}
2023-10-16 11:38:11 +00:00
// NewCore creates, initializes and configures a Vault node (core).
2021-06-10 19:29:32 +00:00
func NewCore ( conf * CoreConfig ) ( * Core , error ) {
2023-10-16 11:38:11 +00:00
// NOTE: The order of configuration of the core has some importance, as we can
// make use of an early return if we are running this new core in recovery mode.
2021-06-10 19:29:32 +00:00
c , err := CreateCore ( conf )
if err != nil {
return nil , err
}
2023-10-16 11:38:11 +00:00
err = coreInit ( c , conf )
if err != nil {
2018-09-18 03:03:00 +00:00
return nil , err
2017-01-06 20:42:18 +00:00
}
2023-10-16 11:38:11 +00:00
switch {
case conf . DisableMlock :
// User configured that memory lock should be disabled on unix systems.
default :
err = mlock . LockMemory ( )
if err != nil {
return nil , fmt . Errorf ( ErrMlockFailedTemplate , err )
2017-02-17 01:13:19 +00:00
}
}
2017-04-04 00:52:29 +00:00
// Construct a new AES-GCM barrier
2017-02-17 01:13:19 +00:00
c . barrier , err = NewAESGCMBarrier ( c . physical )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "barrier setup failed: %w" , err )
2017-02-17 01:13:19 +00:00
}
2023-10-16 11:38:11 +00:00
err = storedLicenseCheck ( c , conf )
if err != nil {
2021-05-17 18:10:26 +00:00
return nil , err
}
2023-10-16 11:38:11 +00:00
2016-09-30 04:06:40 +00:00
// We create the funcs here, then populate the given config with it so that
// the caller can share state
conf . ReloadFuncsLock = & c . reloadFuncsLock
c . reloadFuncsLock . Lock ( )
2020-02-15 19:58:05 +00:00
c . reloadFuncs = make ( map [ string ] [ ] reloadutil . ReloadFunc )
2016-09-30 04:06:40 +00:00
c . reloadFuncsLock . Unlock ( )
conf . ReloadFuncs = & c . reloadFuncs
2022-10-13 22:39:00 +00:00
c . rollbackPeriod = conf . RollbackPeriod
2023-10-16 11:38:11 +00:00
if c . rollbackPeriod == 0 {
// Default to 1 minute
c . rollbackPeriod = 1 * time . Minute
2022-10-13 22:39:00 +00:00
}
2023-10-16 11:38:11 +00:00
// For recovery mode we've now configured enough to return early.
2019-10-15 04:55:31 +00:00
if c . recoveryMode {
return c , nil
}
if conf . PluginDirectory != "" {
c . pluginDirectory , err = filepath . Abs ( conf . PluginDirectory )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "core setup failed, could not verify plugin directory: %w" , err )
2019-10-15 04:55:31 +00:00
}
}
2022-04-04 16:45:41 +00:00
if conf . PluginFileUid != 0 {
c . pluginFileUid = conf . PluginFileUid
}
if conf . PluginFilePermissions != 0 {
c . pluginFilePermissions = conf . PluginFilePermissions
}
2023-10-16 11:38:11 +00:00
// Create secondaries (this will only impact Enterprise versions of Vault)
c . createSecondaries ( conf . Logger )
2019-10-15 04:55:31 +00:00
if conf . HAPhysical != nil && conf . HAPhysical . HAEnabled ( ) {
c . ha = conf . HAPhysical
}
2023-10-16 11:38:11 +00:00
// MFA method
2022-02-17 21:08:51 +00:00
c . loginMFABackend = NewLoginMFABackend ( c , conf . Logger )
2023-11-03 21:40:17 +00:00
if c . loginMFABackend . mfaLogger != nil {
c . AddLogger ( c . loginMFABackend . mfaLogger )
}
2022-02-17 21:08:51 +00:00
2023-10-16 11:38:11 +00:00
// Logical backends
c . configureLogicalBackends ( conf . LogicalBackends , conf . Logger , conf . AdministrativeNamespacePath )
2018-11-07 01:21:24 +00:00
2023-10-16 11:38:11 +00:00
// Credentials backends
c . configureCredentialsBackends ( conf . CredentialBackends , conf . Logger )
2015-03-15 23:25:38 +00:00
2023-10-16 11:38:11 +00:00
// Audit backends
c . configureAuditBackends ( conf . AuditBackends )
2016-04-04 14:44:22 +00:00
2023-10-16 11:38:11 +00:00
// UI
2018-03-27 20:23:33 +00:00
uiStoragePrefix := systemBarrierPrefix + "ui"
c . uiConfig = NewUIConfig ( conf . EnableUI , physical . NewView ( c . physical , uiStoragePrefix ) , NewBarrierView ( c . barrier , uiStoragePrefix ) )
2023-10-16 11:38:11 +00:00
// Listeners
err = c . configureListeners ( conf )
if err != nil {
return nil , err
2021-10-13 15:06:33 +00:00
}
2023-11-08 10:01:32 +00:00
// Log requests level
c . configureLogRequestsLevel ( conf . RawConfig . LogRequestsLevel )
2021-12-08 22:34:42 +00:00
2023-10-16 11:38:11 +00:00
// Quotas
2020-06-26 21:13:16 +00:00
quotasLogger := conf . Logger . Named ( "quotas" )
2023-11-03 21:40:17 +00:00
c . allLoggers = append ( c . allLoggers , quotasLogger )
2023-10-30 17:21:47 +00:00
detectDeadlocks := false
for _ , v := range c . detectDeadlocks {
if v == "quotas" {
detectDeadlocks = true
}
}
c . quotaManager , err = quotas . NewManager ( quotasLogger , c . quotaLeaseWalker , c . metricSink , detectDeadlocks )
2020-06-11 19:07:59 +00:00
if err != nil {
return nil , err
}
2020-06-30 22:21:18 +00:00
err = c . adjustForSealMigration ( conf . UnwrapSeal )
if err != nil {
return nil , err
}
2023-10-16 11:38:11 +00:00
// Version history
2022-04-19 18:28:08 +00:00
if c . versionHistory == nil {
c . logger . Info ( "Initializing version history cache for core" )
c . versionHistory = make ( map [ string ] VaultVersion )
2021-10-14 16:10:59 +00:00
}
2023-10-16 11:38:11 +00:00
// Events
2023-11-03 21:40:17 +00:00
eventsLogger := conf . Logger . Named ( "events" )
c . allLoggers = append ( c . allLoggers , eventsLogger )
// start the event system
events , err := eventbus . NewEventBus ( eventsLogger )
2023-01-18 18:46:01 +00:00
if err != nil {
return nil , err
}
c . events = events
2023-02-09 21:18:58 +00:00
if c . IsExperimentEnabled ( experiments . VaultExperimentEventsAlpha1 ) {
2023-01-18 18:46:01 +00:00
c . events . Start ( )
}
2017-10-23 20:03:36 +00:00
return c , nil
2015-03-09 23:33:27 +00:00
}
2023-10-16 11:38:11 +00:00
// configureListeners configures the Core with the listeners from the CoreConfig.
func ( c * Core ) configureListeners ( conf * CoreConfig ) error {
c . clusterListener . Store ( ( * cluster . Listener ) ( nil ) )
if conf . RawConfig . Listeners == nil {
c . customListenerHeader . Store ( ( [ ] * ListenerCustomHeaders ) ( nil ) )
return nil
}
uiHeaders , err := c . UIHeaders ( )
if err != nil {
return err
}
c . customListenerHeader . Store ( NewListenerCustomHeader ( conf . RawConfig . Listeners , c . logger , uiHeaders ) )
return nil
}
2023-11-08 10:01:32 +00:00
// configureLogRequestsLevel configures the Core with the supplied log requests level.
func ( c * Core ) configureLogRequestsLevel ( level string ) {
2023-10-16 11:38:11 +00:00
c . logRequestsLevel = uberAtomic . NewInt32 ( 0 )
lvl := log . LevelFromString ( level )
switch {
case lvl > log . NoLevel && lvl < log . Off :
c . logRequestsLevel . Store ( int32 ( lvl ) )
case level != "" :
c . logger . Warn ( "invalid log_requests_level" , "level" , level )
}
}
// configureAuditBackends configures the Core with the ability to create audit
// backends for various types.
func ( c * Core ) configureAuditBackends ( backends map [ string ] audit . Factory ) {
auditBackends := make ( map [ string ] audit . Factory , len ( backends ) )
for k , f := range backends {
auditBackends [ k ] = f
}
c . auditBackends = auditBackends
}
// configureCredentialsBackends configures the Core with the ability to create
// credential backends for various types.
func ( c * Core ) configureCredentialsBackends ( backends map [ string ] logical . Factory , logger log . Logger ) {
credentialBackends := make ( map [ string ] logical . Factory , len ( backends ) )
for k , f := range backends {
credentialBackends [ k ] = f
}
credentialBackends [ mountTypeToken ] = func ( ctx context . Context , config * logical . BackendConfig ) ( logical . Backend , error ) {
2023-11-03 21:40:17 +00:00
tsLogger := logger . Named ( "token" )
c . AddLogger ( tsLogger )
return NewTokenStore ( ctx , tsLogger , c , config )
2023-10-16 11:38:11 +00:00
}
c . credentialBackends = credentialBackends
c . addExtraCredentialBackends ( )
}
// configureLogicalBackends configures the Core with the ability to create
// logical backends for various types.
func ( c * Core ) configureLogicalBackends ( backends map [ string ] logical . Factory , logger log . Logger , adminNamespacePath string ) {
logicalBackends := make ( map [ string ] logical . Factory , len ( backends ) )
for k , f := range backends {
logicalBackends [ k ] = f
}
// KV
_ , ok := logicalBackends [ mountTypeKV ]
if ! ok {
logicalBackends [ mountTypeKV ] = PassthroughBackendFactory
}
// Cubbyhole
logicalBackends [ mountTypeCubbyhole ] = CubbyholeBackendFactory
// System
logicalBackends [ mountTypeSystem ] = func ( ctx context . Context , config * logical . BackendConfig ) ( logical . Backend , error ) {
2023-11-03 21:40:17 +00:00
sysBackendLogger := logger . Named ( "system" )
c . AddLogger ( sysBackendLogger )
b := NewSystemBackend ( c , sysBackendLogger )
2023-10-16 11:38:11 +00:00
if err := b . Setup ( ctx , config ) ; err != nil {
return nil , err
}
return b , nil
}
// Identity
logicalBackends [ mountTypeIdentity ] = func ( ctx context . Context , config * logical . BackendConfig ) ( logical . Backend , error ) {
2023-11-03 21:40:17 +00:00
identityLogger := logger . Named ( "identity" )
c . AddLogger ( identityLogger )
return NewIdentityStore ( ctx , c , config , identityLogger )
2023-10-16 11:38:11 +00:00
}
c . logicalBackends = logicalBackends
c . addExtraLogicalBackends ( adminNamespacePath )
}
2021-11-30 17:52:33 +00:00
// handleVersionTimeStamps stores the current version at the current time to
2021-10-14 16:10:59 +00:00
// storage, and then loads all versions and upgrade timestamps out from storage.
2021-11-08 15:04:17 +00:00
func ( c * Core ) handleVersionTimeStamps ( ctx context . Context ) error {
2022-02-14 20:26:57 +00:00
currentTime := time . Now ( ) . UTC ( )
2022-04-19 18:28:08 +00:00
vaultVersion := & VaultVersion {
TimestampInstalled : currentTime ,
Version : version . Version ,
BuildDate : version . BuildDate ,
}
isUpdated , err := c . storeVersionEntry ( ctx , vaultVersion , false )
2021-10-14 16:10:59 +00:00
if err != nil {
2021-11-08 15:04:17 +00:00
return fmt . Errorf ( "error storing vault version: %w" , err )
2021-10-14 16:10:59 +00:00
}
if isUpdated {
2022-04-19 18:28:08 +00:00
c . logger . Info ( "Recorded vault version" , "vault version" , version . Version , "upgrade time" , currentTime , "build date" , version . BuildDate )
2021-10-14 16:10:59 +00:00
}
2022-12-14 18:06:33 +00:00
// Finally, repopulate the version history cache
2022-04-19 18:28:08 +00:00
err = c . loadVersionHistory ( ctx )
2021-10-14 16:10:59 +00:00
if err != nil {
return err
}
return nil
}
2021-04-20 22:25:04 +00:00
// HostnameHeaderEnabled determines whether to add the X-Vault-Hostname header
// to HTTP responses.
func ( c * Core ) HostnameHeaderEnabled ( ) bool {
return c . enableResponseHeaderHostname
}
// RaftNodeIDHeaderEnabled determines whether to add the X-Vault-Raft-Node-ID header
// to HTTP responses.
func ( c * Core ) RaftNodeIDHeaderEnabled ( ) bool {
return c . enableResponseHeaderRaftNodeID
}
2022-02-17 19:43:07 +00:00
// DisableSSCTokens determines whether to use server side consistent tokens or not.
func ( c * Core ) DisableSSCTokens ( ) bool {
return c . disableSSCTokens
}
2022-12-14 18:06:33 +00:00
// ShutdownCoreError logs a shutdown error and shuts down the Vault core.
func ( c * Core ) ShutdownCoreError ( err error ) {
c . Logger ( ) . Error ( "shutting down core" , "error" , err )
if shutdownErr := c . ShutdownWait ( ) ; shutdownErr != nil {
c . Logger ( ) . Error ( "failed to shutdown core" , "error" , shutdownErr )
}
}
2015-06-18 01:23:59 +00:00
// Shutdown is invoked when the Vault instance is about to be terminated. It
// should not be accessible as part of an API call as it will cause an availability
// problem. It is only used to gracefully quit in the case of HA so that failover
// happens as quickly as possible.
func ( c * Core ) Shutdown ( ) error {
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "shutdown called" )
2020-02-15 02:07:31 +00:00
err := c . sealInternal ( )
c . stateLock . Lock ( )
defer c . stateLock . Unlock ( )
2022-12-14 15:59:11 +00:00
doneCh := c . shutdownDoneCh . Load ( ) . ( chan struct { } )
if doneCh != nil {
close ( doneCh )
c . shutdownDoneCh . Store ( ( chan struct { } ) ( nil ) )
2020-02-15 02:07:31 +00:00
}
return err
}
2022-02-23 15:33:52 +00:00
func ( c * Core ) ShutdownWait ( ) error {
donech := c . ShutdownDone ( )
err := c . Shutdown ( )
if donech != nil {
<- donech
}
return err
}
2020-02-15 02:07:31 +00:00
// ShutdownDone returns a channel that will be closed after Shutdown completes
func ( c * Core ) ShutdownDone ( ) <- chan struct { } {
2022-12-14 15:59:11 +00:00
return c . shutdownDoneCh . Load ( ) . ( chan struct { } )
2015-06-18 01:23:59 +00:00
}
2017-06-17 04:04:55 +00:00
// CORSConfig returns the current CORS configuration
func ( c * Core ) CORSConfig ( ) * CORSConfig {
return c . corsConfig
}
2018-01-19 06:44:44 +00:00
func ( c * Core ) GetContext ( ) ( context . Context , context . CancelFunc ) {
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
2018-09-18 03:03:00 +00:00
return context . WithCancel ( namespace . RootContext ( c . activeContext ) )
2018-01-19 06:44:44 +00:00
}
2015-03-09 23:33:27 +00:00
// Sealed checks if the Vault is current sealed
2018-07-24 20:57:25 +00:00
func ( c * Core ) Sealed ( ) bool {
return atomic . LoadUint32 ( c . sealed ) == 1
2015-03-09 23:33:27 +00:00
}
2023-10-27 14:47:12 +00:00
// SecretProgress returns the number of keys provided so far. Lock
// should only be false if the caller is already holding the read
// statelock (such as calls originating from switchedLockHandleRequest).
func ( c * Core ) SecretProgress ( lock bool ) ( int , string ) {
if lock {
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
}
2017-01-17 16:47:06 +00:00
switch c . unlockInfo {
case nil :
return 0 , ""
default :
return len ( c . unlockInfo . Parts ) , c . unlockInfo . Nonce
}
2015-03-11 18:52:01 +00:00
}
2015-10-28 19:59:39 +00:00
// ResetUnsealProcess removes the current unlock parts from memory, to reset
// the unsealing process
func ( c * Core ) ResetUnsealProcess ( ) {
c . stateLock . Lock ( )
defer c . stateLock . Unlock ( )
2017-01-17 16:47:06 +00:00
c . unlockInfo = nil
2015-10-28 19:59:39 +00:00
}
2020-10-23 18:16:04 +00:00
func ( c * Core ) UnsealMigrate ( key [ ] byte ) ( bool , error ) {
err := c . unsealFragment ( key , true )
return ! c . Sealed ( ) , err
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
// Unseal is used to provide one of the key parts to unseal the Vault.
func ( c * Core ) Unseal ( key [ ] byte ) ( bool , error ) {
err := c . unsealFragment ( key , false )
return ! c . Sealed ( ) , err
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
// unseal takes a key fragment and attempts to use it to unseal Vault.
2022-11-29 00:01:47 +00:00
// Vault may remain sealed afterwards even when no error is returned,
2020-10-23 18:16:04 +00:00
// depending on whether enough key fragments were provided to meet the
// target threshold.
//
// The provided key should be a recovery key fragment if the seal
// is an autoseal, or a regular seal key fragment for shamir. In
2022-11-29 00:01:47 +00:00
// migration scenarios "seal" in the preceding sentence refers to
2020-10-23 18:16:04 +00:00
// the migration seal in c.migrationInfo.seal.
//
// We use getUnsealKey to work out if we have enough fragments,
// and if we don't have enough we return early. Otherwise we get
// back the combined key.
//
// For legacy shamir the combined key *is* the master key. For
// shamir the combined key is used to decrypt the master key
// read from storage. For autoseal the combined key isn't used
// except to verify that the stored recovery key matches.
//
// In migration scenarios a side-effect of unsealing is that
// the members of c.migrationInfo are populated (excluding
// .seal, which must already be populated before unseal is called.)
func ( c * Core ) unsealFragment ( key [ ] byte , migrate bool ) error {
2015-04-08 23:43:17 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "unseal" } , time . Now ( ) )
2017-11-07 20:15:39 +00:00
c . stateLock . Lock ( )
defer c . stateLock . Unlock ( )
2018-01-19 06:44:44 +00:00
ctx := context . Background ( )
2020-10-23 18:16:04 +00:00
if migrate && c . migrationInfo == nil {
return fmt . Errorf ( "can't perform a seal migration, no migration seal found" )
}
if migrate && c . isRaftUnseal ( ) {
return fmt . Errorf ( "can't perform a seal migration while joining a raft cluster" )
}
if ! migrate && c . migrationInfo != nil {
done , err := c . sealMigrated ( ctx )
if err != nil {
return fmt . Errorf ( "error checking to see if seal is migrated: %w" , err )
}
if ! done {
return fmt . Errorf ( "migrate option not provided and seal migration is pending" )
}
}
c . logger . Debug ( "unseal key supplied" , "migrate" , migrate )
2017-11-07 20:15:39 +00:00
// Explicitly check for init status. This also checks if the seal
// configuration is valid (i.e. non-nil).
2018-01-19 06:44:44 +00:00
init , err := c . Initialized ( ctx )
2017-11-07 20:15:39 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return err
2017-11-07 20:15:39 +00:00
}
2019-06-20 19:14:58 +00:00
if ! init && ! c . isRaftUnseal ( ) {
2020-10-23 18:16:04 +00:00
return ErrNotInit
2017-11-07 20:15:39 +00:00
}
2015-03-12 18:20:27 +00:00
// Verify the key length
min , max := c . barrier . KeyLength ( )
max += shamir . ShareOverhead
if len ( key ) < min {
2020-10-23 18:16:04 +00:00
return & ErrInvalidKey { fmt . Sprintf ( "key is shorter than minimum %d bytes" , min ) }
2015-03-12 18:20:27 +00:00
}
if len ( key ) > max {
2020-10-23 18:16:04 +00:00
return & ErrInvalidKey { fmt . Sprintf ( "key is longer than maximum %d bytes" , max ) }
2015-03-12 18:20:27 +00:00
}
2017-11-07 20:15:39 +00:00
// Check if already unsealed
2018-07-24 20:57:25 +00:00
if ! c . Sealed ( ) {
2020-10-23 18:16:04 +00:00
return nil
2017-11-07 20:15:39 +00:00
}
2018-10-23 06:34:02 +00:00
sealToUse := c . seal
2020-10-23 18:16:04 +00:00
if migrate {
2019-10-18 18:46:00 +00:00
c . logger . Info ( "unsealing using migration seal" )
2020-02-13 21:27:31 +00:00
sealToUse = c . migrationInfo . seal
2017-11-07 20:15:39 +00:00
}
2020-10-23 18:16:04 +00:00
newKey , err := c . recordUnsealPart ( key )
if ! newKey || err != nil {
return err
2017-02-17 01:13:19 +00:00
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
// getUnsealKey returns either a recovery key (in the case of an autoseal)
// or a master key (legacy shamir) or an unseal key (new-style shamir).
combinedKey , err := c . getUnsealKey ( ctx , sealToUse )
if err != nil || combinedKey == nil {
return err
}
if migrate {
c . migrationInfo . unsealKey = combinedKey
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
if c . isRaftUnseal ( ) {
return c . unsealWithRaft ( combinedKey )
}
masterKey , err := c . unsealKeyToMasterKeyPreUnseal ( ctx , sealToUse , combinedKey )
if err != nil {
return err
}
return c . unsealInternal ( ctx , masterKey )
}
2019-10-18 18:46:00 +00:00
2020-10-23 18:16:04 +00:00
func ( c * Core ) unsealWithRaft ( combinedKey [ ] byte ) error {
ctx := context . Background ( )
2019-10-18 18:46:00 +00:00
2022-08-23 19:37:16 +00:00
if c . seal . BarrierType ( ) == wrapping . WrapperTypeShamir {
2020-10-23 18:16:04 +00:00
// If this is a legacy shamir seal this serves no purpose but it
// doesn't hurt.
2023-05-04 18:22:30 +00:00
shamirWrapper , err := c . seal . GetShamirWrapper ( )
if err == nil {
err = shamirWrapper . SetAesGcmKeyBytes ( combinedKey )
}
2020-10-23 18:16:04 +00:00
if err != nil {
return err
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
}
2019-06-20 19:14:58 +00:00
2022-08-03 22:40:49 +00:00
raftInfo := c . raftInfo . Load ( ) . ( * raftInformation )
switch raftInfo . joinInProgress {
2020-10-23 18:16:04 +00:00
case true :
// JoinRaftCluster is already trying to perform a join based on retry_join configuration.
// Inform that routine that unseal key validation is complete so that it can continue to
// try and join possible leader nodes, and wait for it to complete.
2020-01-14 01:02:16 +00:00
2020-10-23 18:16:04 +00:00
atomic . StoreUint32 ( c . postUnsealStarted , 1 )
2020-01-14 01:02:16 +00:00
2020-10-23 18:16:04 +00:00
c . logger . Info ( "waiting for raft retry join process to complete" )
<- c . raftJoinDoneCh
2020-01-14 01:02:16 +00:00
2020-10-23 18:16:04 +00:00
default :
// This is the case for manual raft join. Send the answer to the leader node and
// wait for data to start streaming in.
2022-08-03 22:40:49 +00:00
if err := c . joinRaftSendAnswer ( ctx , c . seal . GetAccess ( ) , raftInfo ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
// Reset the state
2022-08-03 22:40:49 +00:00
c . raftInfo . Store ( ( * raftInformation ) ( nil ) )
2020-10-23 18:16:04 +00:00
}
2019-06-20 19:14:58 +00:00
2020-10-23 18:16:04 +00:00
go func ( ) {
var masterKey [ ] byte
keyringFound := false
// Wait until we at least have the keyring before we attempt to
// unseal the node.
for {
if ! keyringFound {
keys , err := c . underlyingPhysical . List ( ctx , keyringPrefix )
if err != nil {
c . logger . Error ( "failed to list physical keys" , "error" , err )
return
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
if strutil . StrListContains ( keys , "keyring" ) {
keyringFound = true
2019-06-20 19:14:58 +00:00
}
2020-10-23 18:16:04 +00:00
}
if keyringFound && len ( masterKey ) == 0 {
var err error
masterKey , err = c . unsealKeyToMasterKeyPreUnseal ( ctx , c . seal , combinedKey )
if err != nil {
c . logger . Error ( "failed to read master key" , "error" , err )
2019-10-18 18:46:00 +00:00
return
2019-06-20 19:14:58 +00:00
}
}
2020-10-23 18:16:04 +00:00
if keyringFound && len ( masterKey ) > 0 {
err := c . unsealInternal ( ctx , masterKey )
if err != nil {
c . logger . Error ( "failed to unseal" , "error" , err )
}
return
}
time . Sleep ( 1 * time . Second )
}
} ( )
2017-02-17 01:13:19 +00:00
2020-10-23 18:16:04 +00:00
return nil
2017-02-17 01:13:19 +00:00
}
2020-10-23 18:16:04 +00:00
// recordUnsealPart takes in a key fragment, and returns true if it's a new fragment.
func ( c * Core ) recordUnsealPart ( key [ ] byte ) ( bool , error ) {
2015-03-11 18:43:36 +00:00
// Check if we already have this piece
2017-01-17 16:47:06 +00:00
if c . unlockInfo != nil {
for _ , existing := range c . unlockInfo . Parts {
2017-02-17 01:13:19 +00:00
if subtle . ConstantTimeCompare ( existing , key ) == 1 {
2020-10-23 18:16:04 +00:00
return false , nil
2017-01-17 16:47:06 +00:00
}
}
} else {
uuid , err := uuid . GenerateUUID ( )
if err != nil {
2020-10-23 18:16:04 +00:00
return false , err
2017-01-17 16:47:06 +00:00
}
c . unlockInfo = & unlockInformation {
Nonce : uuid ,
2015-03-11 18:43:36 +00:00
}
}
// Store this key
2017-01-17 16:47:06 +00:00
c . unlockInfo . Parts = append ( c . unlockInfo . Parts , key )
2020-10-23 18:16:04 +00:00
return true , nil
}
2015-03-11 18:43:36 +00:00
2020-10-23 18:16:04 +00:00
// getUnsealKey uses key fragments recorded by recordUnsealPart and
// returns the combined key if the key share threshold is met.
// If the key fragments are part of a recovery key, also verify that
// it matches the stored recovery key on disk.
func ( c * Core ) getUnsealKey ( ctx context . Context , seal Seal ) ( [ ] byte , error ) {
2018-10-23 06:34:02 +00:00
var config * SealConfig
var err error
2019-06-20 19:14:58 +00:00
2022-08-03 22:40:49 +00:00
raftInfo := c . raftInfo . Load ( ) . ( * raftInformation )
2019-06-20 19:14:58 +00:00
switch {
2020-10-23 18:16:04 +00:00
case seal . RecoveryKeySupported ( ) :
2018-10-23 06:34:02 +00:00
config , err = seal . RecoveryConfig ( ctx )
2019-06-20 19:14:58 +00:00
case c . isRaftUnseal ( ) :
// Ignore follower's seal config and refer to leader's barrier
// configuration.
2022-08-03 22:40:49 +00:00
config = raftInfo . leaderBarrierConfig
2019-06-20 19:14:58 +00:00
default :
2018-10-23 06:34:02 +00:00
config , err = seal . BarrierConfig ( ctx )
}
if err != nil {
return nil , err
}
2022-01-19 15:56:04 +00:00
if config == nil {
return nil , fmt . Errorf ( "failed to obtain seal/recovery configuration" )
}
2018-10-23 06:34:02 +00:00
2017-11-07 20:15:39 +00:00
// Check if we don't have enough keys to unlock, proceed through the rest of
// the call only if we have met the threshold
2017-01-17 16:47:06 +00:00
if len ( c . unlockInfo . Parts ) < config . SecretThreshold {
2016-08-19 20:45:17 +00:00
if c . logger . IsDebug ( ) {
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "cannot unseal, not enough keys" , "keys" , len ( c . unlockInfo . Parts ) , "threshold" , config . SecretThreshold , "nonce" , c . unlockInfo . Nonce )
2016-08-19 20:45:17 +00:00
}
2017-02-17 01:13:19 +00:00
return nil , nil
2015-03-11 18:43:36 +00:00
}
2017-02-17 01:13:19 +00:00
defer func ( ) {
c . unlockInfo = nil
} ( )
2017-11-07 20:15:39 +00:00
// Recover the split key. recoveredKey is the shamir combined
// key, or the single provided key if the threshold is 1.
2020-10-23 18:16:04 +00:00
var unsealKey [ ] byte
2015-03-11 18:43:36 +00:00
if config . SecretThreshold == 1 {
2020-10-23 18:16:04 +00:00
unsealKey = make ( [ ] byte , len ( c . unlockInfo . Parts [ 0 ] ) )
copy ( unsealKey , c . unlockInfo . Parts [ 0 ] )
2015-03-11 18:43:36 +00:00
} else {
2020-10-23 18:16:04 +00:00
unsealKey , err = shamir . Combine ( c . unlockInfo . Parts )
2015-03-11 18:43:36 +00:00
if err != nil {
2022-11-29 00:01:47 +00:00
return nil , & ErrInvalidKey { fmt . Sprintf ( "failed to compute combined key: %v" , err ) }
2015-03-11 18:43:36 +00:00
}
}
2020-10-23 18:16:04 +00:00
if seal . RecoveryKeySupported ( ) {
if err := seal . VerifyRecoveryKey ( ctx , unsealKey ) ; err != nil {
2022-11-29 00:01:47 +00:00
return nil , & ErrInvalidKey { fmt . Sprintf ( "failed to verify recovery key: %v" , err ) }
2017-11-07 20:15:39 +00:00
}
2020-10-23 18:16:04 +00:00
}
2017-11-07 20:15:39 +00:00
2020-10-23 18:16:04 +00:00
return unsealKey , nil
}
2017-11-07 20:15:39 +00:00
2020-10-23 18:16:04 +00:00
// sealMigrated must be called with the stateLock held. It returns true if
// the seal configured in HCL and the seal configured in storage match.
// For the auto->auto same seal migration scenario, it will return false even
// if the preceding conditions are true but we cannot decrypt the master key
// in storage using the configured seal.
func ( c * Core ) sealMigrated ( ctx context . Context ) ( bool , error ) {
if atomic . LoadUint32 ( c . sealMigrationDone ) == 1 {
return true , nil
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
existBarrierSealConfig , existRecoverySealConfig , err := c . PhysicalSealConfigs ( ctx )
if err != nil {
return false , err
2020-02-13 21:27:31 +00:00
}
2019-03-04 22:11:56 +00:00
2022-08-23 19:37:16 +00:00
if existBarrierSealConfig . Type != c . seal . BarrierType ( ) . String ( ) {
2020-10-23 18:16:04 +00:00
return false , nil
}
if c . seal . RecoveryKeySupported ( ) && existRecoverySealConfig . Type != c . seal . RecoveryType ( ) {
return false , nil
}
2019-03-04 22:11:56 +00:00
2023-02-01 19:34:53 +00:00
if c . seal . BarrierType ( ) != c . migrationInfo . seal . BarrierType ( ) {
2020-10-23 18:16:04 +00:00
return true , nil
}
2020-07-16 19:14:29 +00:00
2020-10-23 18:16:04 +00:00
// The above checks can handle the auto->shamir and shamir->auto
// and auto1->auto2 cases. For auto1->auto1, we need to actually try
// to read and decrypt the keys.
keysMig , errMig := c . migrationInfo . seal . GetStoredKeys ( ctx )
keys , err := c . seal . GetStoredKeys ( ctx )
switch {
case len ( keys ) > 0 && err == nil :
return true , nil
case len ( keysMig ) > 0 && errMig == nil :
return false , nil
case errors . Is ( err , & ErrDecrypt { } ) && errors . Is ( errMig , & ErrDecrypt { } ) :
return false , fmt . Errorf ( "decrypt error, neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v" , errMig , err )
default :
return false , fmt . Errorf ( "neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v" , errMig , err )
2020-07-16 19:14:29 +00:00
}
2020-10-23 18:16:04 +00:00
}
2020-07-16 19:14:29 +00:00
2020-10-23 18:16:04 +00:00
// migrateSeal must be called with the stateLock held.
func ( c * Core ) migrateSeal ( ctx context . Context ) error {
2020-02-13 21:27:31 +00:00
if c . migrationInfo == nil {
return nil
}
2019-03-04 22:11:56 +00:00
2020-10-23 18:16:04 +00:00
ok , err := c . sealMigrated ( ctx )
2020-02-13 21:27:31 +00:00
if err != nil {
2020-10-23 18:16:04 +00:00
return fmt . Errorf ( "error checking if seal is migrated or not: %w" , err )
2020-02-13 21:27:31 +00:00
}
2021-04-12 15:39:40 +00:00
2020-10-23 18:16:04 +00:00
if ok {
c . logger . Info ( "migration is already performed" )
2020-02-13 21:27:31 +00:00
return nil
}
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
c . logger . Info ( "seal migration initiated" )
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
switch {
case c . migrationInfo . seal . RecoveryKeySupported ( ) && c . seal . RecoveryKeySupported ( ) :
2020-10-23 18:16:04 +00:00
c . logger . Info ( "migrating from one auto-unseal to another" , "from" ,
c . migrationInfo . seal . BarrierType ( ) , "to" , c . seal . BarrierType ( ) )
2019-03-04 22:11:56 +00:00
2020-02-13 21:27:31 +00:00
// Set the recovery and barrier keys to be the same.
recoveryKey , err := c . migrationInfo . seal . RecoveryKey ( ctx )
if err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error getting recovery key to set on new seal: %w" , err )
2020-02-13 21:27:31 +00:00
}
2019-03-04 22:11:56 +00:00
2023-02-01 19:34:53 +00:00
if err := c . seal . SetRecoveryKey ( ctx , recoveryKey ) ; err != nil {
return fmt . Errorf ( "error setting new recovery key information during migrate: %w" , err )
2020-02-13 21:27:31 +00:00
}
2019-03-04 22:11:56 +00:00
2023-02-01 19:34:53 +00:00
barrierKeys , err := c . migrationInfo . seal . GetStoredKeys ( ctx )
2020-02-13 21:27:31 +00:00
if err != nil {
2023-02-01 19:34:53 +00:00
return fmt . Errorf ( "error getting stored keys to set on new seal: %w" , err )
2020-02-13 21:27:31 +00:00
}
2018-10-23 06:34:02 +00:00
2023-02-01 19:34:53 +00:00
if err := c . seal . SetStoredKeys ( ctx , barrierKeys ) ; err != nil {
return fmt . Errorf ( "error setting new barrier key information during migrate: %w" , err )
2020-02-13 21:27:31 +00:00
}
2023-02-01 19:34:53 +00:00
case c . migrationInfo . seal . RecoveryKeySupported ( ) :
2020-02-13 21:27:31 +00:00
c . logger . Info ( "migrating from one auto-unseal to shamir" , "from" , c . migrationInfo . seal . BarrierType ( ) )
// Auto to Shamir, since recovery key isn't supported on new seal
2018-10-23 06:34:02 +00:00
2023-02-01 19:34:53 +00:00
recoveryKey , err := c . migrationInfo . seal . RecoveryKey ( ctx )
2020-10-23 18:16:04 +00:00
if err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error getting recovery key to set on new seal: %w" , err )
2020-02-13 21:27:31 +00:00
}
2018-10-23 06:34:02 +00:00
2023-02-01 19:34:53 +00:00
// We have recovery keys; we're going to use them as the new shamir KeK.
2023-05-04 18:22:30 +00:00
shamirWrapper , err := c . seal . GetShamirWrapper ( )
if err != nil {
return err
}
err = shamirWrapper . SetAesGcmKeyBytes ( recoveryKey )
2023-02-01 19:34:53 +00:00
if err != nil {
return fmt . Errorf ( "failed to set master key in seal: %w" , err )
2020-02-13 21:27:31 +00:00
}
2018-10-23 06:34:02 +00:00
2023-02-01 19:34:53 +00:00
barrierKeys , err := c . migrationInfo . seal . GetStoredKeys ( ctx )
2020-10-23 18:16:04 +00:00
if err != nil {
2023-02-01 19:34:53 +00:00
return fmt . Errorf ( "error getting stored keys to set on new seal: %w" , err )
2020-10-23 18:16:04 +00:00
}
2023-02-01 19:34:53 +00:00
if err := c . seal . SetStoredKeys ( ctx , barrierKeys ) ; err != nil {
return fmt . Errorf ( "error setting new barrier key information during migrate: %w" , err )
2018-10-23 06:34:02 +00:00
}
2023-02-01 19:34:53 +00:00
2020-02-13 21:27:31 +00:00
case c . seal . RecoveryKeySupported ( ) :
c . logger . Info ( "migrating from shamir to auto-unseal" , "to" , c . seal . BarrierType ( ) )
// Migration is happening from shamir -> auto. In this case use the shamir
// combined key that was used to store the master key as the new recovery key.
2020-10-23 18:16:04 +00:00
if err := c . seal . SetRecoveryKey ( ctx , c . migrationInfo . unsealKey ) ; err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error setting new recovery key information: %w" , err )
2020-02-13 21:27:31 +00:00
}
2018-10-23 06:34:02 +00:00
2020-02-13 21:27:31 +00:00
// Generate a new master key
newMasterKey , err := c . barrier . GenerateKey ( c . secureRandomReader )
2018-10-23 06:34:02 +00:00
if err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error generating new master key: %w" , err )
2018-10-23 06:34:02 +00:00
}
2020-02-13 21:27:31 +00:00
2020-10-23 18:16:04 +00:00
// Rekey the barrier. This handles the case where the shamir seal we're
// migrating from was a legacy seal without a stored master key.
2020-02-13 21:27:31 +00:00
if err := c . barrier . Rekey ( ctx , newMasterKey ) ; err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error rekeying barrier during migration: %w" , err )
2018-10-23 06:34:02 +00:00
}
2017-11-07 20:15:39 +00:00
2020-02-13 21:27:31 +00:00
// Store the new master key
if err := c . seal . SetStoredKeys ( ctx , [ ] [ ] byte { newMasterKey } ) ; err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error storing new master key: %w" , err )
2017-11-07 20:15:39 +00:00
}
2020-02-13 21:27:31 +00:00
default :
return errors . New ( "unhandled migration case (shamir to shamir)" )
2017-11-07 20:15:39 +00:00
}
2020-10-23 18:16:04 +00:00
err = c . migrateSealConfig ( ctx )
2020-02-13 21:27:31 +00:00
if err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error storing new seal configs: %w" , err )
2020-02-13 21:27:31 +00:00
}
2020-10-23 18:16:04 +00:00
// Flag migration performed for seal-rewrap later
atomic . StoreUint32 ( c . sealMigrationDone , 1 )
2020-02-13 21:27:31 +00:00
c . logger . Info ( "seal migration complete" )
return nil
2017-01-06 21:30:43 +00:00
}
2017-11-07 20:15:39 +00:00
// unsealInternal takes in the master key and attempts to unseal the barrier.
// N.B.: This must be called with the state write lock held.
2020-10-23 18:16:04 +00:00
func ( c * Core ) unsealInternal ( ctx context . Context , masterKey [ ] byte ) error {
2015-03-11 18:43:36 +00:00
// Attempt to unlock
2018-01-19 06:44:44 +00:00
if err := c . barrier . Unseal ( ctx , masterKey ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2015-03-11 18:43:36 +00:00
}
2018-09-18 03:03:00 +00:00
if err := preUnsealInternal ( ctx , c ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2018-09-18 03:03:00 +00:00
}
2019-02-15 02:14:56 +00:00
if err := c . startClusterListener ( ctx ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2019-02-15 02:14:56 +00:00
}
2020-06-23 19:04:13 +00:00
if err := c . startRaftBackend ( ctx ) ; err != nil {
2020-10-23 18:16:04 +00:00
return err
2019-06-20 19:14:58 +00:00
}
2020-02-15 00:39:13 +00:00
if err := c . setupReplicationResolverHandler ( ) ; err != nil {
c . logger . Warn ( "failed to start replication resolver server" , "error" , err )
}
2015-04-14 21:06:15 +00:00
// Do post-unseal setup if HA is not enabled
if c . ha == nil {
2016-08-15 13:42:42 +00:00
// We still need to set up cluster info even if it's not part of a
2017-01-04 21:44:03 +00:00
// cluster right now. This also populates the cached cluster object.
2018-01-19 06:44:44 +00:00
if err := c . setupCluster ( ctx ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "cluster setup failed" , "error" , err )
2016-08-15 13:42:42 +00:00
c . barrier . Seal ( )
2018-04-03 00:46:59 +00:00
c . logger . Warn ( "vault is sealed" )
2020-10-23 18:16:04 +00:00
return err
2016-08-15 13:42:42 +00:00
}
2017-02-17 01:13:19 +00:00
2020-02-13 21:27:31 +00:00
if err := c . migrateSeal ( ctx ) ; err != nil {
c . logger . Error ( "seal migration error" , "error" , err )
c . barrier . Seal ( )
c . logger . Warn ( "vault is sealed" )
2020-10-23 18:16:04 +00:00
return err
2020-02-13 21:27:31 +00:00
}
2018-09-18 03:03:00 +00:00
ctx , ctxCancel := context . WithCancel ( namespace . RootContext ( nil ) )
if err := c . postUnseal ( ctx , ctxCancel , standardUnsealStrategy { } ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "post-unseal setup failed" , "error" , err )
2015-04-14 21:06:15 +00:00
c . barrier . Seal ( )
2018-04-03 00:46:59 +00:00
c . logger . Warn ( "vault is sealed" )
2020-10-23 18:16:04 +00:00
return err
2015-04-14 21:06:15 +00:00
}
2017-02-17 01:13:19 +00:00
2020-02-13 21:27:31 +00:00
// Force a cache bust here, which will also run migration code
if c . seal . RecoveryKeySupported ( ) {
c . seal . SetRecoveryConfig ( ctx , nil )
}
2015-12-17 18:48:08 +00:00
c . standby = false
2015-04-14 21:06:15 +00:00
} else {
// Go to standby mode, wait until we are active to unseal
c . standbyDoneCh = make ( chan struct { } )
2020-07-31 14:01:51 +00:00
c . manualStepDownCh = make ( chan struct { } , 1 )
2020-07-21 12:34:07 +00:00
c . standbyStopCh . Store ( make ( chan struct { } ) )
go c . runStandby ( c . standbyDoneCh , c . manualStepDownCh , c . standbyStopCh . Load ( ) . ( chan struct { } ) )
2015-03-11 22:19:41 +00:00
}
2018-07-24 20:57:25 +00:00
// Success!
atomic . StoreUint32 ( c . sealed , 0 )
2020-06-17 14:50:28 +00:00
c . metricSink . SetGaugeWithLabels ( [ ] string { "core" , "unsealed" } , 1 , nil )
2018-07-24 20:57:25 +00:00
2019-06-20 19:14:58 +00:00
if c . logger . IsInfo ( ) {
c . logger . Info ( "vault is unsealed" )
}
2019-12-06 14:46:39 +00:00
if c . serviceRegistration != nil {
2020-01-24 17:42:03 +00:00
if err := c . serviceRegistration . NotifySealedStateChange ( false ) ; err != nil {
2019-12-06 14:46:39 +00:00
if c . logger . IsWarn ( ) {
c . logger . Warn ( "failed to notify unsealed status" , "error" , err )
2016-04-28 17:56:41 +00:00
}
2016-04-23 02:55:17 +00:00
}
2020-06-29 20:02:49 +00:00
if err := c . serviceRegistration . NotifyInitializedStateChange ( true ) ; err != nil {
if c . logger . IsWarn ( ) {
c . logger . Warn ( "failed to notify initialized status" , "error" , err )
}
}
2016-04-23 02:55:17 +00:00
}
2020-10-23 18:16:04 +00:00
return nil
2015-03-09 23:33:27 +00:00
}
2015-03-10 00:45:34 +00:00
2016-05-20 17:03:54 +00:00
// SealWithRequest takes in a logical.Request, acquires the lock, and passes
// through to sealInternal
2018-07-24 21:50:49 +00:00
func ( c * Core ) SealWithRequest ( httpCtx context . Context , req * logical . Request ) error {
2016-05-20 17:03:54 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "seal-with-request" } , time . Now ( ) )
2018-07-24 20:57:25 +00:00
if c . Sealed ( ) {
2016-05-20 17:03:54 +00:00
return nil
}
2018-07-24 20:57:25 +00:00
c . stateLock . RLock ( )
2017-08-04 20:42:51 +00:00
// This will unlock the read lock
2018-01-19 06:44:44 +00:00
// We use background context since we may not be active
2018-09-18 03:03:00 +00:00
ctx , cancel := context . WithCancel ( namespace . RootContext ( nil ) )
2018-07-24 21:50:49 +00:00
defer cancel ( )
go func ( ) {
select {
case <- ctx . Done ( ) :
case <- httpCtx . Done ( ) :
cancel ( )
}
} ( )
// This will unlock the read lock
return c . sealInitCommon ( ctx , req )
2016-05-20 17:03:54 +00:00
}
// Seal takes in a token and creates a logical.Request, acquires the lock, and
// passes through to sealInternal
func ( c * Core ) Seal ( token string ) error {
2015-04-08 23:43:17 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "seal" } , time . Now ( ) )
2016-02-27 00:43:55 +00:00
2018-07-24 20:57:25 +00:00
if c . Sealed ( ) {
2016-05-20 17:03:54 +00:00
return nil
2015-03-10 00:45:34 +00:00
}
2015-03-31 16:59:02 +00:00
2018-07-24 20:57:25 +00:00
c . stateLock . RLock ( )
2016-01-07 20:10:05 +00:00
req := & logical . Request {
Operation : logical . UpdateOperation ,
Path : "sys/seal" ,
ClientToken : token ,
}
2017-08-04 20:42:51 +00:00
// This will unlock the read lock
2018-01-19 06:44:44 +00:00
// We use background context since we may not be active
2018-09-18 03:03:00 +00:00
return c . sealInitCommon ( namespace . RootContext ( nil ) , req )
2016-05-20 17:03:54 +00:00
}
// sealInitCommon is common logic for Seal and SealWithRequest and is used to
// re-seal the Vault. This requires the Vault to be unsealed again to perform
2017-08-04 20:42:51 +00:00
// any further operations. Note: this function will read-unlock the state lock.
2018-01-19 06:44:44 +00:00
func ( c * Core ) sealInitCommon ( ctx context . Context , req * logical . Request ) ( retErr error ) {
2016-05-20 17:03:54 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "seal-internal" } , time . Now ( ) )
2019-06-19 13:40:57 +00:00
var unlocked bool
defer func ( ) {
if ! unlocked {
c . stateLock . RUnlock ( )
}
} ( )
2016-05-20 17:03:54 +00:00
if req == nil {
2021-06-11 17:18:16 +00:00
return errors . New ( "nil request to seal" )
2016-05-20 17:03:54 +00:00
}
2018-04-14 01:49:40 +00:00
// Since there is no token store in standby nodes, sealing cannot be done.
// Ideally, the request has to be forwarded to leader node for validation
// and the operation should be performed. But for now, just returning with
// an error and recommending a vault restart, which essentially does the
// same thing.
if c . standby {
c . logger . Error ( "vault cannot seal when in standby mode; please restart instead" )
2021-06-11 17:18:16 +00:00
return errors . New ( "vault cannot seal when in standby mode; please restart instead" )
2018-04-14 01:49:40 +00:00
}
2021-06-11 17:18:16 +00:00
err := c . PopulateTokenEntry ( ctx , req )
if err != nil {
if errwrap . Contains ( err , logical . ErrPermissionDenied . Error ( ) ) {
return logical . ErrPermissionDenied
}
return logical . ErrInvalidRequest
}
2018-09-18 03:03:00 +00:00
acl , te , entity , identityPolicies , err := c . fetchACLTokenEntryAndEntity ( ctx , req )
2015-03-31 16:59:02 +00:00
if err != nil {
2021-06-11 17:18:16 +00:00
return err
2015-03-31 16:59:02 +00:00
}
2016-05-20 17:03:54 +00:00
// Audit-log the request before going any further
auth := & logical . Auth {
2018-09-18 03:03:00 +00:00
ClientToken : req . ClientToken ,
2018-10-15 16:56:24 +00:00
Accessor : req . ClientTokenAccessor ,
2018-04-14 01:49:40 +00:00
}
if te != nil {
2018-09-18 03:03:00 +00:00
auth . IdentityPolicies = identityPolicies [ te . NamespaceID ]
delete ( identityPolicies , te . NamespaceID )
auth . ExternalNamespacePolicies = identityPolicies
2018-06-14 13:49:33 +00:00
auth . TokenPolicies = te . Policies
2018-09-18 03:03:00 +00:00
auth . Policies = append ( te . Policies , identityPolicies [ te . NamespaceID ] ... )
2018-04-14 01:49:40 +00:00
auth . Metadata = te . Meta
auth . DisplayName = te . DisplayName
auth . EntityID = te . EntityID
2018-10-15 16:56:24 +00:00
auth . TokenType = te . Type
2016-05-20 17:03:54 +00:00
}
2019-05-22 22:52:53 +00:00
logInput := & logical . LogInput {
2018-03-02 17:18:39 +00:00
Auth : auth ,
Request : req ,
}
if err := c . auditBroker . LogRequest ( ctx , logInput , c . auditedHeaders ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "failed to audit request" , "request_path" , req . Path , "error" , err )
2021-06-11 17:18:16 +00:00
return errors . New ( "failed to audit request, cannot continue" )
2016-05-20 17:03:54 +00:00
}
2018-04-14 01:49:40 +00:00
if entity != nil && entity . Disabled {
2018-06-19 16:57:19 +00:00
c . logger . Warn ( "permission denied as the entity on the token is disabled" )
2021-06-11 17:18:16 +00:00
return logical . ErrPermissionDenied
2018-06-19 16:57:19 +00:00
}
if te != nil && te . EntityID != "" && entity == nil {
c . logger . Warn ( "permission denied as the entity on the token is invalid" )
2021-06-11 17:18:16 +00:00
return logical . ErrPermissionDenied
2018-04-14 01:49:40 +00:00
}
2016-02-29 02:35:32 +00:00
// Attempt to use the token (decrement num_uses)
2016-05-02 07:11:14 +00:00
// On error bail out; if the token has been revoked, bail out too
2016-02-29 02:35:32 +00:00
if te != nil {
2018-01-19 06:44:44 +00:00
te , err = c . tokenStore . UseToken ( ctx , te )
2016-05-02 07:11:14 +00:00
if err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "failed to use token" , "error" , err )
2021-06-11 17:18:16 +00:00
return ErrInternalError
2016-05-02 07:11:14 +00:00
}
if te == nil {
// Token is no longer valid
2021-06-11 17:18:16 +00:00
return logical . ErrPermissionDenied
2016-05-02 07:11:14 +00:00
}
2016-02-29 02:35:32 +00:00
}
2015-03-31 16:59:02 +00:00
2016-01-07 20:10:05 +00:00
// Verify that this operation is allowed
2018-01-19 07:43:36 +00:00
authResults := c . performPolicyChecks ( ctx , acl , te , req , entity , & PolicyCheckOpts {
2017-10-23 20:03:36 +00:00
RootPrivsRequired : true ,
} )
if ! authResults . Allowed {
2018-08-11 02:32:10 +00:00
retErr = multierror . Append ( retErr , authResults . Error )
2018-08-11 01:05:10 +00:00
if authResults . Error . ErrorOrNil ( ) == nil || authResults . DeniedError {
2018-08-11 02:32:10 +00:00
retErr = multierror . Append ( retErr , logical . ErrPermissionDenied )
2018-08-11 01:05:10 +00:00
}
2018-08-11 02:32:10 +00:00
return retErr
2016-01-07 20:10:05 +00:00
}
2018-05-10 19:50:02 +00:00
if te != nil && te . NumUses == tokenRevocationPending {
2017-11-02 13:47:02 +00:00
// Token needs to be revoked. We do this immediately here because
// we won't have a token store after sealing.
2018-08-02 01:39:39 +00:00
leaseID , err := c . expiration . CreateOrFetchRevocationLeaseByToken ( c . activeContext , te )
2018-05-10 19:50:02 +00:00
if err == nil {
2018-08-02 01:39:39 +00:00
err = c . expiration . Revoke ( c . activeContext , leaseID )
2018-05-10 19:50:02 +00:00
}
2017-11-02 13:47:02 +00:00
if err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "token needed revocation before seal but failed to revoke" , "error" , err )
2017-11-02 13:47:02 +00:00
retErr = multierror . Append ( retErr , ErrInternalError )
}
}
2018-07-24 20:57:25 +00:00
// Unlock; sealing will grab the lock when needed
2019-06-19 13:40:57 +00:00
unlocked = true
2017-08-04 20:42:51 +00:00
c . stateLock . RUnlock ( )
2018-07-25 03:26:28 +00:00
sealErr := c . sealInternal ( )
2017-08-04 20:42:51 +00:00
2018-03-06 23:06:09 +00:00
if sealErr != nil {
retErr = multierror . Append ( retErr , sealErr )
2015-08-20 17:37:42 +00:00
}
2018-03-06 23:06:09 +00:00
return
2015-06-18 01:23:59 +00:00
}
2018-03-27 20:23:33 +00:00
// UIEnabled returns if the UI is enabled
func ( c * Core ) UIEnabled ( ) bool {
return c . uiConfig . Enabled ( )
}
// UIHeaders returns configured UI headers
func ( c * Core ) UIHeaders ( ) ( http . Header , error ) {
return c . uiConfig . Headers ( context . Background ( ) )
}
2016-02-27 00:43:55 +00:00
// sealInternal is an internal method used to seal the vault. It does not do
2018-07-24 20:57:25 +00:00
// any authorization checking.
2018-07-25 03:26:28 +00:00
func ( c * Core ) sealInternal ( ) error {
2019-06-21 00:55:10 +00:00
return c . sealInternalWithOptions ( true , false , true )
2018-07-25 03:26:28 +00:00
}
2020-06-23 19:04:13 +00:00
func ( c * Core ) sealInternalWithOptions ( grabStateLock , keepHALock , performCleanup bool ) error {
2018-07-24 20:57:25 +00:00
// Mark sealed, and if already marked return
if swapped := atomic . CompareAndSwapUint32 ( c . sealed , 0 , 1 ) ; ! swapped {
2017-08-04 20:42:51 +00:00
return nil
}
2020-06-17 14:50:28 +00:00
c . metricSink . SetGaugeWithLabels ( [ ] string { "core" , "unsealed" } , 0 , nil )
2017-08-04 20:42:51 +00:00
2018-09-04 17:53:40 +00:00
c . logger . Info ( "marked as sealed" )
2017-02-28 23:17:19 +00:00
2017-03-01 23:16:47 +00:00
// Clear forwarding clients
c . requestForwardingConnectionLock . Lock ( )
c . clearForwardingClients ( )
c . requestForwardingConnectionLock . Unlock ( )
2018-08-01 19:07:37 +00:00
activeCtxCancel := c . activeContextCancelFunc . Load ( ) . ( context . CancelFunc )
cancelCtxAndLock := func ( ) {
doneCh := make ( chan struct { } )
go func ( ) {
select {
case <- doneCh :
// Attempt to drain any inflight requests
case <- time . After ( DefaultMaxRequestDuration ) :
if activeCtxCancel != nil {
activeCtxCancel ( )
}
}
} ( )
c . stateLock . Lock ( )
close ( doneCh )
// Stop requests from processing
if activeCtxCancel != nil {
activeCtxCancel ( )
}
}
2015-04-14 21:06:15 +00:00
// Do pre-seal teardown if HA is not enabled
if c . ha == nil {
2018-07-30 18:54:54 +00:00
if grabStateLock {
2018-08-01 19:07:37 +00:00
cancelCtxAndLock ( )
2018-07-30 18:54:54 +00:00
defer c . stateLock . Unlock ( )
}
2017-01-11 16:13:09 +00:00
// Even in a non-HA context we key off of this for some things
c . standby = true
2018-07-24 20:57:25 +00:00
// Stop requests from processing
2018-08-01 19:07:37 +00:00
if activeCtxCancel != nil {
activeCtxCancel ( )
2018-07-24 20:57:25 +00:00
}
2015-04-14 21:06:15 +00:00
if err := c . preSeal ( ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "pre-seal teardown failed" , "error" , err )
2015-04-14 21:06:15 +00:00
return fmt . Errorf ( "internal error" )
}
} else {
2018-07-24 20:57:25 +00:00
// If we are keeping the lock we already have the state write lock
// held. Otherwise grab it here so that when stopCh is triggered we are
// locked.
2018-07-25 03:26:28 +00:00
if keepHALock {
2018-06-09 19:35:22 +00:00
atomic . StoreUint32 ( c . keepHALockOnStepDown , 1 )
2018-07-25 03:26:28 +00:00
}
if grabStateLock {
2018-08-01 19:07:37 +00:00
cancelCtxAndLock ( )
2018-07-24 20:57:25 +00:00
defer c . stateLock . Unlock ( )
2018-03-07 02:35:58 +00:00
}
2018-08-01 19:07:37 +00:00
2018-03-06 23:06:09 +00:00
// If we are trying to acquire the lock, force it to return with nil so
// runStandby will exit
// If we are active, signal the standby goroutine to shut down and wait
// for completion. We have the state lock here so nothing else should
// be toggling standby status.
2020-07-21 12:34:07 +00:00
close ( c . standbyStopCh . Load ( ) . ( chan struct { } ) )
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "finished triggering standbyStopCh for runStandby" )
2018-03-06 23:06:09 +00:00
2018-03-07 02:35:58 +00:00
// Wait for runStandby to stop
<- c . standbyDoneCh
2018-06-09 19:35:22 +00:00
atomic . StoreUint32 ( c . keepHALockOnStepDown , 0 )
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "runStandby done" )
2015-03-13 18:16:24 +00:00
}
2020-02-15 00:39:13 +00:00
c . teardownReplicationResolverHandler ( )
2020-06-23 19:04:13 +00:00
// Perform additional cleanup upon sealing.
if performCleanup {
if raftBackend := c . getRaftBackend ( ) ; raftBackend != nil {
if err := raftBackend . TeardownCluster ( c . getClusterListener ( ) ) ; err != nil {
2019-06-21 00:55:10 +00:00
c . logger . Error ( "error stopping storage cluster" , "error" , err )
return err
}
2019-06-20 19:14:58 +00:00
}
2019-06-21 00:55:10 +00:00
// Stop the cluster listener
c . stopClusterListener ( )
}
2019-02-15 02:14:56 +00:00
2018-04-03 00:46:59 +00:00
c . logger . Debug ( "sealing barrier" )
2015-03-13 18:34:40 +00:00
if err := c . barrier . Seal ( ) ; err != nil {
2018-04-03 00:46:59 +00:00
c . logger . Error ( "error sealing barrier" , "error" , err )
2015-03-13 18:34:40 +00:00
return err
}
2016-02-27 00:43:55 +00:00
2019-12-06 14:46:39 +00:00
if c . serviceRegistration != nil {
2020-01-24 17:42:03 +00:00
if err := c . serviceRegistration . NotifySealedStateChange ( true ) ; err != nil {
2019-12-06 14:46:39 +00:00
if c . logger . IsWarn ( ) {
c . logger . Warn ( "failed to notify sealed status" , "error" , err )
2016-04-28 17:56:41 +00:00
}
2016-04-23 02:55:17 +00:00
}
}
2020-07-02 01:14:33 +00:00
if c . quotaManager != nil {
if err := c . quotaManager . Reset ( ) ; err != nil {
c . logger . Error ( "error resetting quota manager" , "error" , err )
}
2020-06-26 21:13:16 +00:00
}
2018-09-18 03:03:00 +00:00
2020-06-29 17:23:10 +00:00
postSealInternal ( c )
2018-04-03 00:46:59 +00:00
c . logger . Info ( "vault is sealed" )
2017-02-28 23:17:19 +00:00
2015-03-13 18:34:40 +00:00
return nil
2015-03-10 00:45:34 +00:00
}
2015-03-11 22:19:41 +00:00
2018-09-18 03:03:00 +00:00
type UnsealStrategy interface {
unseal ( context . Context , log . Logger , * Core ) error
}
2018-04-19 17:29:43 +00:00
2018-09-18 03:03:00 +00:00
type standardUnsealStrategy struct { }
2017-01-06 20:42:18 +00:00
2018-09-18 03:03:00 +00:00
func ( s standardUnsealStrategy ) unseal ( ctx context . Context , logger log . Logger , c * Core ) error {
2017-03-01 23:16:47 +00:00
// Clear forwarding clients; we're active
c . requestForwardingConnectionLock . Lock ( )
c . clearForwardingClients ( )
c . requestForwardingConnectionLock . Unlock ( )
2020-12-12 00:50:19 +00:00
// Mark the active time. We do this first so it can be correlated to the logs
// for the active startup.
c . activeTime = time . Now ( ) . UTC ( )
2018-09-18 03:03:00 +00:00
if err := postUnsealPhysical ( c ) ; err != nil {
return err
2017-02-28 23:36:28 +00:00
}
2020-10-16 18:57:11 +00:00
if err := enterprisePostUnseal ( c , false ) ; err != nil {
2017-02-17 01:13:19 +00:00
return err
}
2020-06-04 17:00:33 +00:00
if ! c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary | consts . ReplicationDRSecondary ) {
2020-02-19 23:06:53 +00:00
// Only perf primarys should write feature flags, but we do it by
// excluding other states so that we don't have to change it when
// a non-replicated cluster becomes a primary.
if err := c . persistFeatureFlags ( ctx ) ; err != nil {
return err
}
2021-02-25 20:27:25 +00:00
}
if c . autoRotateCancel == nil {
var autoRotateCtx context . Context
autoRotateCtx , c . autoRotateCancel = context . WithCancel ( c . activeContext )
go c . autoRotateBarrierLoop ( autoRotateCtx )
2020-02-19 23:06:53 +00:00
}
2018-09-18 03:03:00 +00:00
if ! c . IsDRSecondary ( ) {
if err := c . ensureWrappingKey ( ctx ) ; err != nil {
return err
}
2017-01-04 21:44:03 +00:00
}
2018-11-07 01:21:24 +00:00
if err := c . setupPluginCatalog ( ctx ) ; err != nil {
2017-08-16 02:10:32 +00:00
return err
}
2018-08-01 19:07:37 +00:00
if err := c . loadMounts ( ctx ) ; err != nil {
2015-03-11 22:19:41 +00:00
return err
}
2019-10-27 20:30:38 +00:00
if err := enterpriseSetupFilteredPaths ( c ) ; err != nil {
return err
}
2018-08-01 19:07:37 +00:00
if err := c . setupMounts ( ctx ) ; err != nil {
2015-03-11 22:50:27 +00:00
return err
}
2021-10-12 00:21:38 +00:00
if err := enterpriseSetupAPILock ( c , ctx ) ; err != nil {
return err
}
2018-08-01 19:07:37 +00:00
if err := c . setupPolicyStore ( ctx ) ; err != nil {
2015-11-02 16:01:00 +00:00
return err
2015-03-18 21:00:42 +00:00
}
2021-12-14 21:00:17 +00:00
if err := c . setupManagedKeyRegistry ( ) ; err != nil {
return err
2022-01-19 15:56:04 +00:00
}
2018-08-01 19:07:37 +00:00
if err := c . loadCORSConfig ( ctx ) ; err != nil {
2017-06-17 04:04:55 +00:00
return err
}
2018-08-01 19:07:37 +00:00
if err := c . loadCredentials ( ctx ) ; err != nil {
2015-11-02 16:01:00 +00:00
return err
2015-03-18 22:46:07 +00:00
}
2019-10-27 20:30:38 +00:00
if err := enterpriseSetupFilteredPaths ( c ) ; err != nil {
return err
}
2018-08-01 19:07:37 +00:00
if err := c . setupCredentials ( ctx ) ; err != nil {
2015-11-02 16:01:00 +00:00
return err
2015-03-18 22:30:31 +00:00
}
2020-06-26 21:13:16 +00:00
if err := c . setupQuotas ( ctx , false ) ; err != nil {
2022-02-17 19:43:07 +00:00
return err
}
if err := c . setupHeaderHMACKey ( ctx , false ) ; err != nil {
2020-06-26 21:13:16 +00:00
return err
}
2018-09-18 03:03:00 +00:00
if ! c . IsDRSecondary ( ) {
2023-08-17 21:41:15 +00:00
c . updateLockedUserEntries ( )
2018-09-18 03:03:00 +00:00
if err := c . startRollback ( ) ; err != nil {
return err
}
2021-10-22 23:37:01 +00:00
if err := c . setupExpiration ( expireLeaseStrategyFairsharing ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
if err := c . loadAudits ( ctx ) ; err != nil {
return err
}
if err := c . setupAudits ( ctx ) ; err != nil {
return err
}
if err := c . loadIdentityStoreArtifacts ( ctx ) ; err != nil {
return err
}
2022-05-05 22:53:57 +00:00
if err := loadPolicyMFAConfigs ( ctx , c ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
2022-05-05 22:53:57 +00:00
c . setupCachedMFAResponseAuth ( )
if err := c . loadLoginMFAConfigs ( ctx ) ; err != nil {
return err
}
2018-09-18 03:03:00 +00:00
if err := c . setupAuditedHeadersConfig ( ctx ) ; err != nil {
2020-06-30 18:33:30 +00:00
return err
}
2023-04-21 19:29:37 +00:00
if err := c . setupCensusAgent ( ) ; err != nil {
c . logger . Error ( "skipping reporting for nil agent" , "error" , err )
}
2020-12-02 20:48:13 +00:00
// not waiting on wg to avoid changing existing behavior
var wg sync . WaitGroup
if err := c . setupActivityLog ( ctx , & wg ) ; err != nil {
2020-09-08 19:22:09 +00:00
return err
}
2020-06-30 22:32:06 +00:00
} else {
c . auditBroker = NewAuditBroker ( c . logger )
}
if ! c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary | consts . ReplicationDRSecondary ) {
2021-04-08 16:43:39 +00:00
// Cannot do this above, as we need other resources like mounts to be setup
2020-06-30 18:33:30 +00:00
if err := c . setupPluginReload ( ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
2017-09-01 05:02:03 +00:00
}
2018-09-18 03:03:00 +00:00
2019-09-03 15:59:56 +00:00
if c . getClusterListener ( ) != nil && ( c . ha != nil || shouldStartClusterListener ( c ) ) {
2019-07-03 20:56:30 +00:00
if err := c . setupRaftActiveNode ( ctx ) ; err != nil {
return err
}
2019-06-27 17:00:03 +00:00
2019-02-15 02:14:56 +00:00
if err := c . startForwarding ( ctx ) ; err != nil {
2018-09-18 03:03:00 +00:00
return err
}
2019-06-20 19:14:58 +00:00
2015-03-24 01:00:14 +00:00
}
2018-09-18 03:03:00 +00:00
c . clusterParamsLock . Lock ( )
defer c . clusterParamsLock . Unlock ( )
if err := startReplication ( c ) ; err != nil {
2015-03-27 21:00:38 +00:00
return err
}
2018-09-18 03:03:00 +00:00
2022-10-07 16:09:08 +00:00
c . metricsCh = make ( chan struct { } )
go c . emitMetricsActiveNode ( c . metricsCh )
2022-12-14 18:06:33 +00:00
// Establish version timestamps at the end of unseal on active nodes only.
if err := c . handleVersionTimeStamps ( ctx ) ; err != nil {
return err
}
2018-09-18 03:03:00 +00:00
return nil
}
2020-09-15 22:12:28 +00:00
// postUnseal is invoked on the active node, and performance standby nodes,
// after the barrier is unsealed, but before
2018-09-18 03:03:00 +00:00
// allowing any user operations. This allows us to setup any state that
// requires the Vault to be unsealed such as mount tables, logical backends,
// credential stores, etc.
func ( c * Core ) postUnseal ( ctx context . Context , ctxCancelFunc context . CancelFunc , unsealer UnsealStrategy ) ( retErr error ) {
defer metrics . MeasureSince ( [ ] string { "core" , "post_unseal" } , time . Now ( ) )
// Clear any out
c . postUnsealFuncs = nil
// Create a new request context
c . activeContext = ctx
c . activeContextCancelFunc . Store ( ctxCancelFunc )
defer func ( ) {
if retErr != nil {
ctxCancelFunc ( )
2022-09-13 17:03:19 +00:00
_ = c . preSeal ( )
2018-09-18 03:03:00 +00:00
}
} ( )
c . logger . Info ( "post-unseal setup starting" )
// Enable the cache
c . physicalCache . Purge ( ctx )
if ! c . cachingDisabled {
c . physicalCache . SetEnabled ( true )
2015-03-27 21:00:38 +00:00
}
2018-09-18 03:03:00 +00:00
// Purge these for safety in case of a rekey
2022-09-13 17:03:19 +00:00
_ = c . seal . SetBarrierConfig ( ctx , nil )
2018-09-18 03:03:00 +00:00
if c . seal . RecoveryKeySupported ( ) {
2022-09-13 17:03:19 +00:00
_ = c . seal . SetRecoveryConfig ( ctx , nil )
2017-10-23 20:03:36 +00:00
}
2018-09-18 03:03:00 +00:00
2022-12-14 18:06:33 +00:00
// Load prior un-updated store into version history cache to compare
// previous state.
if err := c . loadVersionHistory ( ctx ) ; err != nil {
return err
}
2018-09-18 03:03:00 +00:00
if err := unsealer . unseal ( ctx , c . logger , c ) ; err != nil {
2017-02-02 19:49:20 +00:00
return err
}
2017-04-04 00:52:29 +00:00
2019-10-03 20:40:18 +00:00
// Automatically re-encrypt the keys used for auto unsealing when the
// seal's encryption key changes. The regular rotation of cryptographic
// keys is a NIST recommendation. Access to prior keys for decryption
// is normally supported for a configurable time period. Re-encrypting
// the keys used for auto unsealing ensures Vault and its data will
// continue to be accessible even after prior seal keys are destroyed.
if seal , ok := c . seal . ( * autoSeal ) ; ok {
if err := seal . UpgradeKeys ( c . activeContext ) ; err != nil {
c . logger . Warn ( "post-unseal upgrade seal keys failed" , "error" , err )
}
2021-11-10 20:46:07 +00:00
// Start a periodic but infrequent heartbeat to detect auto-seal backend outages at runtime rather than being
// surprised by this at the next need to unseal.
seal . StartHealthCheck ( )
2019-10-03 20:40:18 +00:00
}
2018-04-19 17:29:43 +00:00
// This is intentionally the last block in this function. We want to allow
// writes just before allowing client requests, to ensure everything has
// been set up properly before any writes can have happened.
2022-12-12 23:07:53 +00:00
//
// Use a small temporary worker pool to run postUnsealFuncs in parallel
postUnsealFuncConcurrency := runtime . NumCPU ( ) * 2
if v := os . Getenv ( "VAULT_POSTUNSEAL_FUNC_CONCURRENCY" ) ; v != "" {
pv , err := strconv . Atoi ( v )
if err != nil || pv < 1 {
c . logger . Warn ( "invalid value for VAULT_POSTUNSEAL_FUNC_CURRENCY, must be a positive integer" , "error" , err , "value" , pv )
} else {
postUnsealFuncConcurrency = pv
}
}
if postUnsealFuncConcurrency <= 1 {
// Out of paranoia, keep the old logic for parallism=1
for _ , v := range c . postUnsealFuncs {
v ( )
}
} else {
2022-12-16 17:35:24 +00:00
jobs := make ( chan func ( ) )
2022-12-12 23:07:53 +00:00
var wg sync . WaitGroup
for i := 0 ; i < postUnsealFuncConcurrency ; i ++ {
2022-12-16 17:35:24 +00:00
go func ( ) {
for v := range jobs {
2022-12-12 23:07:53 +00:00
v ( )
wg . Done ( )
}
2022-12-16 17:35:24 +00:00
} ( )
2022-12-12 23:07:53 +00:00
}
2022-12-16 17:35:24 +00:00
for _ , v := range c . postUnsealFuncs {
2022-12-12 23:07:53 +00:00
wg . Add ( 1 )
2022-12-16 17:35:24 +00:00
jobs <- v
2022-12-12 23:07:53 +00:00
}
wg . Wait ( )
2022-12-16 17:35:24 +00:00
close ( jobs )
2018-04-19 17:29:43 +00:00
}
2020-10-23 18:16:04 +00:00
if atomic . LoadUint32 ( c . sealMigrationDone ) == 1 {
2019-10-16 18:00:00 +00:00
if err := c . postSealMigration ( ctx ) ; err != nil {
c . logger . Warn ( "post-unseal post seal migration failed" , "error" , err )
}
2019-10-16 16:52:37 +00:00
}
2022-02-24 19:57:40 +00:00
if os . Getenv ( EnvVaultDisableLocalAuthMountEntities ) != "" {
c . logger . Warn ( "disabling entities for local auth mounts through env var" , "env" , EnvVaultDisableLocalAuthMountEntities )
}
2022-02-23 15:33:52 +00:00
c . loginMFABackend . usedCodes = cache . New ( 0 , 30 * time . Second )
2022-04-14 17:48:24 +00:00
if c . systemBackend != nil && c . systemBackend . mfaBackend != nil {
c . systemBackend . mfaBackend . usedCodes = cache . New ( 0 , 30 * time . Second )
}
2018-04-03 00:46:59 +00:00
c . logger . Info ( "post-unseal setup complete" )
2015-03-11 22:19:41 +00:00
return nil
}
2015-03-13 18:16:24 +00:00
// preSeal is invoked before the barrier is sealed, allowing
// for any state teardown required.
2015-11-02 18:29:18 +00:00
func ( c * Core ) preSeal ( ) error {
2015-04-08 23:43:17 +00:00
defer metrics . MeasureSince ( [ ] string { "core" , "pre_seal" } , time . Now ( ) )
2018-04-03 00:46:59 +00:00
c . logger . Info ( "pre-seal teardown starting" )
2015-05-28 19:07:52 +00:00
2022-02-23 15:33:52 +00:00
if seal , ok := c . seal . ( * autoSeal ) ; ok {
seal . StopHealthCheck ( )
}
2018-04-19 17:29:43 +00:00
// Clear any pending funcs
c . postUnsealFuncs = nil
2020-12-12 00:50:19 +00:00
c . activeTime = time . Time { }
2018-04-19 17:29:43 +00:00
2015-05-28 19:07:52 +00:00
// Clear any rekey progress
2016-04-04 14:44:22 +00:00
c . barrierRekeyConfig = nil
c . recoveryRekeyConfig = nil
2015-05-28 19:07:52 +00:00
2015-04-08 23:43:17 +00:00
if c . metricsCh != nil {
close ( c . metricsCh )
c . metricsCh = nil
}
2015-11-02 18:29:18 +00:00
var result error
2017-02-17 01:13:19 +00:00
2019-06-21 00:55:10 +00:00
c . stopForwarding ( )
2019-07-03 20:56:30 +00:00
c . stopRaftActiveNode ( )
2019-06-27 17:00:03 +00:00
2018-09-18 03:03:00 +00:00
c . clusterParamsLock . Lock ( )
if err := stopReplication ( c ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error stopping replication: %w" , err ) )
2018-09-18 03:03:00 +00:00
}
c . clusterParamsLock . Unlock ( )
2015-03-27 21:00:38 +00:00
if err := c . teardownAudits ( ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error tearing down audits: %w" , err ) )
2015-03-27 21:00:38 +00:00
}
2015-03-24 01:00:14 +00:00
if err := c . stopExpiration ( ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error stopping expiration: %w" , err ) )
2015-03-24 01:00:14 +00:00
}
2021-05-06 14:19:53 +00:00
c . stopActivityLog ( )
2023-05-19 14:42:50 +00:00
// Clean up the censusAgent on seal
if err := c . teardownCensusAgent ( ) ; err != nil {
result = multierror . Append ( result , fmt . Errorf ( "error tearing down reporting agent: %w" , err ) )
}
2022-02-17 21:08:51 +00:00
2018-08-01 19:07:37 +00:00
if err := c . teardownCredentials ( context . Background ( ) ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error tearing down credentials: %w" , err ) )
2015-03-18 22:30:31 +00:00
}
2015-03-18 21:00:42 +00:00
if err := c . teardownPolicyStore ( ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error tearing down policy store: %w" , err ) )
2015-03-18 21:00:42 +00:00
}
2015-03-17 23:23:58 +00:00
if err := c . stopRollback ( ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error stopping rollback: %w" , err ) )
2015-03-17 23:23:58 +00:00
}
2018-08-01 19:07:37 +00:00
if err := c . unloadMounts ( context . Background ( ) ) ; err != nil {
2021-05-11 17:12:54 +00:00
result = multierror . Append ( result , fmt . Errorf ( "error unloading mounts: %w" , err ) )
2015-03-13 18:16:24 +00:00
}
2021-07-07 19:03:39 +00:00
2017-02-17 01:13:19 +00:00
if err := enterprisePreSeal ( c ) ; err != nil {
result = multierror . Append ( result , err )
}
2021-02-25 20:27:25 +00:00
if c . autoRotateCancel != nil {
c . autoRotateCancel ( )
c . autoRotateCancel = nil
}
2023-08-17 21:41:15 +00:00
if c . updateLockedUserEntriesCancel != nil {
c . updateLockedUserEntriesCancel ( )
c . updateLockedUserEntriesCancel = nil
}
2021-11-10 20:46:07 +00:00
if seal , ok := c . seal . ( * autoSeal ) ; ok {
seal . StopHealthCheck ( )
}
2022-04-14 17:48:24 +00:00
if c . systemBackend != nil && c . systemBackend . mfaBackend != nil {
c . systemBackend . mfaBackend . usedCodes = nil
}
2022-05-05 22:53:57 +00:00
if err := c . teardownLoginMFA ( ) ; err != nil {
result = multierror . Append ( result , fmt . Errorf ( "error tearing down login MFA, error: %w" , err ) )
}
2018-09-18 03:03:00 +00:00
preSealPhysical ( c )
2018-01-26 03:21:51 +00:00
2018-04-03 00:46:59 +00:00
c . logger . Info ( "pre-seal teardown complete" )
2015-11-02 18:29:18 +00:00
return result
2015-03-13 18:16:24 +00:00
}
2015-04-08 23:43:17 +00:00
2020-10-16 18:57:11 +00:00
func enterprisePostUnsealImpl ( c * Core , isStandby bool ) error {
2017-02-17 01:13:19 +00:00
return nil
}
func enterprisePreSealImpl ( c * Core ) error {
return nil
}
2019-10-27 20:30:38 +00:00
func enterpriseSetupFilteredPathsImpl ( c * Core ) error {
return nil
}
2020-06-26 21:13:16 +00:00
func enterpriseSetupQuotasImpl ( ctx context . Context , c * Core ) error {
return nil
}
2017-02-17 01:13:19 +00:00
func startReplicationImpl ( c * Core ) error {
return nil
}
func stopReplicationImpl ( c * Core ) error {
return nil
}
2021-10-12 00:21:38 +00:00
func setupAPILockImpl ( _ * Core , _ context . Context ) error { return nil }
2021-10-07 17:25:16 +00:00
2017-02-16 20:15:02 +00:00
func ( c * Core ) ReplicationState ( ) consts . ReplicationState {
2018-01-16 18:51:55 +00:00
return consts . ReplicationState ( atomic . LoadUint32 ( c . replicationState ) )
2017-02-16 20:15:02 +00:00
}
2018-01-20 00:24:04 +00:00
func ( c * Core ) ActiveNodeReplicationState ( ) consts . ReplicationState {
return consts . ReplicationState ( atomic . LoadUint32 ( c . activeNodeReplicationState ) )
}
2016-04-04 14:44:22 +00:00
func ( c * Core ) SealAccess ( ) * SealAccess {
2017-10-23 20:03:36 +00:00
return NewSealAccess ( c . seal )
2016-04-04 14:44:22 +00:00
}
2016-08-15 13:42:42 +00:00
2019-09-18 19:07:18 +00:00
// StorageType returns a string equal to the storage configuration's type.
func ( c * Core ) StorageType ( ) string {
return c . storageType
}
2016-08-19 20:45:17 +00:00
func ( c * Core ) Logger ( ) log . Logger {
2016-08-15 13:42:42 +00:00
return c . logger
}
2016-08-15 20:01:15 +00:00
func ( c * Core ) BarrierKeyLength ( ) ( min , max int ) {
min , max = c . barrier . KeyLength ( )
max += shamir . ShareOverhead
return
}
2017-02-02 19:49:20 +00:00
func ( c * Core ) AuditedHeadersConfig ( ) * AuditedHeadersConfig {
return c . auditedHeaders
}
2017-03-01 17:39:42 +00:00
2018-09-18 03:03:00 +00:00
func waitUntilWALShippedImpl ( ctx context . Context , c * Core , index uint64 ) bool {
return true
}
2019-07-22 17:11:00 +00:00
func merkleRootImpl ( c * Core ) string {
return ""
}
2018-10-16 13:38:44 +00:00
func lastWALImpl ( c * Core ) uint64 {
return 0
}
2019-07-22 17:11:00 +00:00
func lastPerformanceWALImpl ( c * Core ) uint64 {
return 0
}
2021-10-26 21:17:20 +00:00
func lastDRWALImpl ( c * Core ) uint64 {
return 0
}
2017-03-01 17:42:10 +00:00
func lastRemoteWALImpl ( c * Core ) uint64 {
2017-03-01 17:39:42 +00:00
return 0
}
2017-10-23 20:03:36 +00:00
2021-02-24 11:58:10 +00:00
func lastRemoteUpstreamWALImpl ( c * Core ) uint64 {
return 0
}
2018-10-23 06:34:02 +00:00
func ( c * Core ) PhysicalSealConfigs ( ctx context . Context ) ( * SealConfig , * SealConfig , error ) {
pe , err := c . physical . Get ( ctx , barrierSealConfigPath )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , nil , fmt . Errorf ( "failed to fetch barrier seal configuration at migration check time: %w" , err )
2018-10-23 06:34:02 +00:00
}
if pe == nil {
return nil , nil , nil
}
barrierConf := new ( SealConfig )
if err := jsonutil . DecodeJSON ( pe . Value , barrierConf ) ; err != nil {
2021-05-11 17:12:54 +00:00
return nil , nil , fmt . Errorf ( "failed to decode barrier seal configuration at migration check time: %w" , err )
2018-10-23 06:34:02 +00:00
}
2018-12-14 00:44:56 +00:00
err = barrierConf . Validate ( )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , nil , fmt . Errorf ( "failed to validate barrier seal configuration at migration check time: %w" , err )
2018-12-14 00:44:56 +00:00
}
// In older versions of vault the default seal would not store a type. This
2019-03-19 13:32:45 +00:00
// is here to offer backwards compatibility for older seal configs.
2018-12-14 00:44:56 +00:00
if barrierConf . Type == "" {
2022-08-23 19:37:16 +00:00
barrierConf . Type = wrapping . WrapperTypeShamir . String ( )
2018-12-14 00:44:56 +00:00
}
2018-10-23 06:34:02 +00:00
var recoveryConf * SealConfig
pe , err = c . physical . Get ( ctx , recoverySealConfigPlaintextPath )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , nil , fmt . Errorf ( "failed to fetch seal configuration at migration check time: %w" , err )
2018-10-23 06:34:02 +00:00
}
if pe != nil {
recoveryConf = & SealConfig { }
if err := jsonutil . DecodeJSON ( pe . Value , recoveryConf ) ; err != nil {
2021-05-11 17:12:54 +00:00
return nil , nil , fmt . Errorf ( "failed to decode seal configuration at migration check time: %w" , err )
2018-10-23 06:34:02 +00:00
}
2018-12-14 00:44:56 +00:00
err = recoveryConf . Validate ( )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , nil , fmt . Errorf ( "failed to validate seal configuration at migration check time: %w" , err )
2018-12-14 00:44:56 +00:00
}
// In older versions of vault the default seal would not store a type. This
2019-03-19 13:32:45 +00:00
// is here to offer backwards compatibility for older seal configs.
2018-12-14 00:44:56 +00:00
if recoveryConf . Type == "" {
2022-08-23 19:37:16 +00:00
recoveryConf . Type = wrapping . WrapperTypeShamir . String ( )
2018-12-14 00:44:56 +00:00
}
2018-10-23 06:34:02 +00:00
}
return barrierConf , recoveryConf , nil
}
2020-10-23 18:16:04 +00:00
// adjustForSealMigration takes the unwrapSeal, which is nil if (a) we're not
// configured for seal migration or (b) we might be doing a seal migration away
// from shamir. It will only be non-nil if there is a configured seal with
// the config key disabled=true, which implies a migration away from autoseal.
//
// For case (a), the common case, we expect that the stored barrier
// config matches the seal type, in which case we simply return nil. If they
// don't match, and the stored seal config is of type Shamir but the configured
// seal is not Shamir, that is case (b) and we make an unwrapSeal of type Shamir.
// Any other unwrapSeal=nil scenario is treated as an error.
//
// Given a non-nil unwrapSeal or case (b), we setup c.migrationInfo to prepare
// for a migration upon receiving a valid migration unseal request. We cannot
// check at this time for already performed (or incomplete) migrations because
// we haven't yet been unsealed, so we have no way of checking whether a
// shamir seal works to read stored seal-encrypted data.
//
// The assumption throughout is that the very last step of seal migration is
// to write the new barrier/recovery stored seal config.
2020-06-11 19:07:59 +00:00
func ( c * Core ) adjustForSealMigration ( unwrapSeal Seal ) error {
2020-10-23 18:16:04 +00:00
ctx := context . Background ( )
existBarrierSealConfig , existRecoverySealConfig , err := c . PhysicalSealConfigs ( ctx )
2020-06-11 19:07:59 +00:00
if err != nil {
return fmt . Errorf ( "Error checking for existing seal: %s" , err )
}
// If we don't have an existing config or if it's the deprecated auto seal
// which needs an upgrade, skip out
2022-08-23 19:37:16 +00:00
if existBarrierSealConfig == nil || existBarrierSealConfig . Type == WrapperTypeHsmAutoDeprecated . String ( ) {
2020-06-11 19:07:59 +00:00
return nil
}
if unwrapSeal == nil {
2020-10-23 18:16:04 +00:00
// With unwrapSeal==nil, either we're not migrating, or we're migrating
// from shamir.
2022-08-23 19:37:16 +00:00
2020-10-23 18:16:04 +00:00
switch {
2022-08-23 19:37:16 +00:00
case existBarrierSealConfig . Type == c . seal . BarrierType ( ) . String ( ) :
2020-10-23 18:16:04 +00:00
// We have the same barrier type and the unwrap seal is nil so we're not
// migrating from same to same, IOW we assume it's not a migration.
2020-06-11 19:07:59 +00:00
return nil
2022-08-23 19:37:16 +00:00
case c . seal . BarrierType ( ) == wrapping . WrapperTypeShamir :
2020-10-23 18:16:04 +00:00
// The stored barrier config is not shamir, there is no disabled seal
// in config, and either no configured seal (which equates to Shamir)
// or an explicitly configured Shamir seal.
return fmt . Errorf ( "cannot seal migrate from %q to Shamir, no disabled seal in configuration" ,
existBarrierSealConfig . Type )
2022-08-23 19:37:16 +00:00
case existBarrierSealConfig . Type == wrapping . WrapperTypeShamir . String ( ) :
2020-10-23 18:16:04 +00:00
// The configured seal is not Shamir, the stored seal config is Shamir.
// This is a migration away from Shamir.
2023-05-04 18:22:30 +00:00
unwrapSeal = NewDefaultSeal ( vaultseal . NewAccess ( aeadwrapper . NewShamirWrapper ( ) ) )
2020-10-23 18:16:04 +00:00
default :
// We know at this point that there is a configured non-Shamir seal,
// that it does not match the stored non-Shamir seal config, and that
// there is no explicit disabled seal stanza.
return fmt . Errorf ( "cannot seal migrate from %q to %q, no disabled seal in configuration" ,
existBarrierSealConfig . Type , c . seal . BarrierType ( ) )
2020-06-11 19:07:59 +00:00
}
} else {
2020-08-10 12:35:57 +00:00
// If we're not coming from Shamir we expect the previous seal to be
// in the config and disabled.
2022-08-23 19:37:16 +00:00
2023-02-01 19:34:53 +00:00
if unwrapSeal . BarrierType ( ) == wrapping . WrapperTypeShamir {
2020-06-11 19:07:59 +00:00
return errors . New ( "Shamir seals cannot be set disabled (they should simply not be set)" )
}
}
2020-08-10 12:35:57 +00:00
2020-10-23 18:16:04 +00:00
// If we've reached this point it's a migration attempt and we should have both
// c.migrationInfo.seal (old seal) and c.seal (new seal) populated.
unwrapSeal . SetCore ( c )
2020-06-11 19:07:59 +00:00
2020-10-23 18:16:04 +00:00
// No stored recovery seal config found, what about the legacy recovery config?
2022-08-23 19:37:16 +00:00
if existBarrierSealConfig . Type != wrapping . WrapperTypeShamir . String ( ) && existRecoverySealConfig == nil {
2020-10-23 18:16:04 +00:00
entry , err := c . physical . Get ( ctx , recoverySealConfigPath )
2020-08-10 12:35:57 +00:00
if err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "failed to read %q recovery seal configuration: %w" , existBarrierSealConfig . Type , err )
2020-08-10 12:35:57 +00:00
}
if entry == nil {
return errors . New ( "Recovery seal configuration not found for existing seal" )
}
return errors . New ( "Cannot migrate seals while using a legacy recovery seal config" )
2020-06-11 19:07:59 +00:00
}
2020-08-10 12:35:57 +00:00
c . migrationInfo = & migrationInformation {
2020-10-23 18:16:04 +00:00
seal : unwrapSeal ,
}
2022-08-23 19:37:16 +00:00
if existBarrierSealConfig . Type != c . seal . BarrierType ( ) . String ( ) {
2020-10-23 18:16:04 +00:00
// It's unnecessary to call this when doing an auto->auto
// same-seal-type migration, since they'll have the same configs before
// and after migration.
c . adjustSealConfigDuringMigration ( existBarrierSealConfig , existRecoverySealConfig )
2020-06-11 19:07:59 +00:00
}
2020-08-10 12:35:57 +00:00
c . initSealsForMigration ( )
c . logger . Warn ( "entering seal migration mode; Vault will not automatically unseal even if using an autoseal" , "from_barrier_type" , c . migrationInfo . seal . BarrierType ( ) , "to_barrier_type" , c . seal . BarrierType ( ) )
2020-06-11 19:07:59 +00:00
2020-08-10 12:35:57 +00:00
return nil
}
2020-10-23 18:16:04 +00:00
func ( c * Core ) migrateSealConfig ( ctx context . Context ) error {
existBarrierSealConfig , existRecoverySealConfig , err := c . PhysicalSealConfigs ( ctx )
if err != nil {
return fmt . Errorf ( "failed to read existing seal configuration during migration: %v" , err )
2020-06-11 19:07:59 +00:00
}
2020-10-23 18:16:04 +00:00
var bc , rc * SealConfig
2020-06-11 19:07:59 +00:00
switch {
2023-02-01 19:34:53 +00:00
case c . migrationInfo . seal . RecoveryKeySupported ( ) && c . seal . RecoveryKeySupported ( ) :
2020-06-11 19:07:59 +00:00
// Migrating from auto->auto, copy the configs over
2020-10-23 18:16:04 +00:00
bc , rc = existBarrierSealConfig , existRecoverySealConfig
2023-02-01 19:34:53 +00:00
case c . migrationInfo . seal . RecoveryKeySupported ( ) :
2020-06-11 19:07:59 +00:00
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
2020-10-23 18:16:04 +00:00
bc = existRecoverySealConfig . Clone ( )
bc . StoredShares = 1
case c . seal . RecoveryKeySupported ( ) :
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
bc = & SealConfig {
2022-08-23 19:37:16 +00:00
Type : c . seal . BarrierType ( ) . String ( ) ,
2020-10-23 18:16:04 +00:00
SecretShares : 1 ,
SecretThreshold : 1 ,
StoredShares : 1 ,
}
rc = existBarrierSealConfig . Clone ( )
rc . StoredShares = 0
}
if err := c . seal . SetBarrierConfig ( ctx , bc ) ; err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error storing barrier config after migration: %w" , err )
2020-10-23 18:16:04 +00:00
}
if c . seal . RecoveryKeySupported ( ) {
if err := c . seal . SetRecoveryConfig ( ctx , rc ) ; err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "error storing recovery config after migration: %w" , err )
2020-10-23 18:16:04 +00:00
}
} else if err := c . physical . Delete ( ctx , recoverySealConfigPlaintextPath ) ; err != nil {
2021-05-11 17:12:54 +00:00
return fmt . Errorf ( "failed to delete old recovery seal configuration during migration: %w" , err )
2020-10-23 18:16:04 +00:00
}
return nil
}
func ( c * Core ) adjustSealConfigDuringMigration ( existBarrierSealConfig , existRecoverySealConfig * SealConfig ) {
switch {
2023-02-01 19:34:53 +00:00
case c . migrationInfo . seal . RecoveryKeySupported ( ) && existRecoverySealConfig != nil :
2020-10-23 18:16:04 +00:00
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1. Unless the recover config doesn't exist, in which
// case the migration is assumed to already have been performed.
2020-06-11 19:07:59 +00:00
newSealConfig := existRecoverySealConfig . Clone ( )
newSealConfig . StoredShares = 1
2020-08-10 12:35:57 +00:00
c . seal . SetCachedBarrierConfig ( newSealConfig )
2020-10-23 18:16:04 +00:00
case ! c . migrationInfo . seal . RecoveryKeySupported ( ) && c . seal . RecoveryKeySupported ( ) :
2020-06-11 19:07:59 +00:00
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
newBarrierSealConfig := & SealConfig {
2022-08-23 19:37:16 +00:00
Type : c . seal . BarrierType ( ) . String ( ) ,
2020-06-11 19:07:59 +00:00
SecretShares : 1 ,
SecretThreshold : 1 ,
StoredShares : 1 ,
}
2020-08-10 12:35:57 +00:00
c . seal . SetCachedBarrierConfig ( newBarrierSealConfig )
2020-06-11 19:07:59 +00:00
newRecoveryConfig := existBarrierSealConfig . Clone ( )
newRecoveryConfig . StoredShares = 0
2020-08-10 12:35:57 +00:00
c . seal . SetCachedRecoveryConfig ( newRecoveryConfig )
2019-03-04 22:11:56 +00:00
}
2018-10-23 06:34:02 +00:00
}
2021-12-07 01:12:20 +00:00
func ( c * Core ) unsealKeyToRootKeyPostUnseal ( ctx context . Context , combinedKey [ ] byte ) ( [ ] byte , error ) {
2020-10-23 18:16:04 +00:00
return c . unsealKeyToMasterKey ( ctx , c . seal , combinedKey , true , false )
}
func ( c * Core ) unsealKeyToMasterKeyPreUnseal ( ctx context . Context , seal Seal , combinedKey [ ] byte ) ( [ ] byte , error ) {
return c . unsealKeyToMasterKey ( ctx , seal , combinedKey , false , true )
}
2019-10-23 16:52:28 +00:00
// unsealKeyToMasterKey takes a key provided by the user, either a recovery key
// if using an autoseal or an unseal key with Shamir. It returns a nil error
// if the key is valid and an error otherwise. It also returns the master key
// that can be used to unseal the barrier.
2020-10-23 18:16:04 +00:00
// If useTestSeal is true, seal will not be modified; this is used when not
// invoked as part of an unseal process. Otherwise in the non-legacy shamir
// case the combinedKey will be set in the seal, which means subsequent attempts
// to use the seal to read the master key will succeed, assuming combinedKey is
// valid.
// If allowMissing is true, a failure to find the master key in storage results
// in a nil error and a nil master key being returned.
func ( c * Core ) unsealKeyToMasterKey ( ctx context . Context , seal Seal , combinedKey [ ] byte , useTestSeal bool , allowMissing bool ) ( [ ] byte , error ) {
switch seal . StoredKeysSupported ( ) {
2020-01-11 01:39:52 +00:00
case vaultseal . StoredKeysSupportedGeneric :
2020-10-23 18:16:04 +00:00
if err := seal . VerifyRecoveryKey ( ctx , combinedKey ) ; err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "recovery key verification failed: %w" , err )
2019-10-23 16:52:28 +00:00
}
2020-10-23 18:16:04 +00:00
storedKeys , err := seal . GetStoredKeys ( ctx )
if storedKeys == nil && err == nil && allowMissing {
return nil , nil
}
2019-10-23 16:52:28 +00:00
if err == nil && len ( storedKeys ) != 1 {
err = fmt . Errorf ( "expected exactly one stored key, got %d" , len ( storedKeys ) )
}
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "unable to retrieve stored keys: %w" , err )
2019-10-23 16:52:28 +00:00
}
return storedKeys [ 0 ] , nil
2021-12-07 01:12:20 +00:00
case vaultseal . StoredKeysSupportedShamirRoot :
2020-10-23 18:16:04 +00:00
if useTestSeal {
2023-05-04 18:22:30 +00:00
testseal := NewDefaultSeal ( vaultseal . NewAccess ( aeadwrapper . NewShamirWrapper ( ) ) )
2020-10-23 18:16:04 +00:00
testseal . SetCore ( c )
cfg , err := seal . BarrierConfig ( ctx )
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "failed to setup test barrier config: %w" , err )
2020-10-23 18:16:04 +00:00
}
testseal . SetCachedBarrierConfig ( cfg )
seal = testseal
2019-10-23 16:52:28 +00:00
}
2020-10-23 18:16:04 +00:00
2023-05-04 18:22:30 +00:00
shamirWrapper , err := seal . GetShamirWrapper ( )
if err != nil {
return nil , err
}
err = shamirWrapper . SetAesGcmKeyBytes ( combinedKey )
2019-10-23 16:52:28 +00:00
if err != nil {
2022-11-29 00:01:47 +00:00
return nil , & ErrInvalidKey { fmt . Sprintf ( "failed to setup unseal key: %v" , err ) }
2019-10-23 16:52:28 +00:00
}
2020-10-23 18:16:04 +00:00
storedKeys , err := seal . GetStoredKeys ( ctx )
if storedKeys == nil && err == nil && allowMissing {
return nil , nil
}
2019-10-23 16:52:28 +00:00
if err == nil && len ( storedKeys ) != 1 {
err = fmt . Errorf ( "expected exactly one stored key, got %d" , len ( storedKeys ) )
}
if err != nil {
2021-05-11 17:12:54 +00:00
return nil , fmt . Errorf ( "unable to retrieve stored keys: %w" , err )
2019-10-23 16:52:28 +00:00
}
return storedKeys [ 0 ] , nil
2020-01-11 01:39:52 +00:00
case vaultseal . StoredKeysNotSupported :
2019-10-23 16:52:28 +00:00
return combinedKey , nil
}
return nil , fmt . Errorf ( "invalid seal" )
}
2020-10-23 18:16:04 +00:00
// IsInSealMigrationMode returns true if we're configured to perform a seal migration,
// meaning either that we have a disabled seal in HCL configuration or the seal
// configuration in storage is Shamir but the seal in HCL is not. In this
// mode we should not auto-unseal (even if the migration is done) and we will
// accept unseal requests with and without the `migrate` option, though the migrate
2023-10-27 14:47:12 +00:00
// option is required if we haven't yet performed the seal migration. Lock
// should only be false if the caller is already holding the read
// statelock (such as calls originating from switchedLockHandleRequest).
func ( c * Core ) IsInSealMigrationMode ( lock bool ) bool {
if lock {
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
}
2020-02-13 21:27:31 +00:00
return c . migrationInfo != nil
2018-10-23 06:34:02 +00:00
}
2020-10-23 18:16:04 +00:00
// IsSealMigrated returns true if we're in seal migration mode but migration
// has already been performed (possibly by another node, or prior to this node's
2023-10-27 14:47:12 +00:00
// current invocation). Lock should only be false if the caller is already
// holding the read statelock (such as calls originating from switchedLockHandleRequest).
func ( c * Core ) IsSealMigrated ( lock bool ) bool {
if ! c . IsInSealMigrationMode ( lock ) {
2020-10-23 18:16:04 +00:00
return false
}
2023-10-27 14:47:12 +00:00
if lock {
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
}
2020-10-23 18:16:04 +00:00
done , _ := c . sealMigrated ( context . Background ( ) )
return done
}
2017-10-23 20:03:36 +00:00
func ( c * Core ) BarrierEncryptorAccess ( ) * BarrierEncryptorAccess {
return NewBarrierEncryptorAccess ( c . barrier )
}
func ( c * Core ) PhysicalAccess ( ) * physical . PhysicalAccess {
return physical . NewPhysicalAccess ( c . physical )
}
func ( c * Core ) RouterAccess ( ) * RouterAccess {
return NewRouterAccess ( c )
}
2018-01-03 20:07:13 +00:00
// IsDRSecondary returns if the current cluster state is a DR secondary.
func ( c * Core ) IsDRSecondary ( ) bool {
return c . ReplicationState ( ) . HasState ( consts . ReplicationDRSecondary )
}
2018-09-05 19:52:54 +00:00
2021-02-24 11:58:10 +00:00
func ( c * Core ) IsPerfSecondary ( ) bool {
return c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary )
}
2018-09-05 19:52:54 +00:00
func ( c * Core ) AddLogger ( logger log . Logger ) {
c . allLoggersLock . Lock ( )
defer c . allLoggersLock . Unlock ( )
c . allLoggers = append ( c . allLoggers , logger )
}
2022-11-28 16:18:36 +00:00
// SetLogLevel sets logging level for all tracked loggers to the level provided
2018-09-05 19:52:54 +00:00
func ( c * Core ) SetLogLevel ( level log . Level ) {
c . allLoggersLock . RLock ( )
defer c . allLoggersLock . RUnlock ( )
for _ , logger := range c . allLoggers {
logger . SetLevel ( level )
}
}
2018-11-07 01:21:24 +00:00
2022-11-28 16:18:36 +00:00
// SetLogLevelByName sets the logging level of named logger to level provided
// if it exists. Core.allLoggers is a slice and as such it is entirely possible
// that multiple entries exist for the same name. Each instance will be modified.
func ( c * Core ) SetLogLevelByName ( name string , level log . Level ) bool {
2022-06-27 15:39:53 +00:00
c . allLoggersLock . RLock ( )
defer c . allLoggersLock . RUnlock ( )
2022-11-28 16:18:36 +00:00
found := false
2022-06-27 15:39:53 +00:00
for _ , logger := range c . allLoggers {
if logger . Name ( ) == name {
logger . SetLevel ( level )
2022-11-28 16:18:36 +00:00
found = true
2022-06-27 15:39:53 +00:00
}
}
2022-11-28 16:18:36 +00:00
return found
2022-06-27 15:39:53 +00:00
}
2019-10-08 17:57:15 +00:00
// SetConfig sets core's config object to the newly provided config.
func ( c * Core ) SetConfig ( conf * server . Config ) {
2020-04-16 23:34:46 +00:00
c . rawConfig . Store ( conf )
2020-07-30 17:15:00 +00:00
bz , err := json . Marshal ( c . SanitizedConfig ( ) )
if err != nil {
c . logger . Error ( "error serializing sanitized config" , "error" , err )
return
}
c . logger . Debug ( "set config" , "sanitized config" , string ( bz ) )
2019-10-08 17:57:15 +00:00
}
2021-10-13 15:06:33 +00:00
func ( c * Core ) GetListenerCustomResponseHeaders ( listenerAdd string ) * ListenerCustomHeaders {
customHeaders := c . customListenerHeader . Load ( )
if customHeaders == nil {
return nil
}
customHeadersList , ok := customHeaders . ( [ ] * ListenerCustomHeaders )
if customHeadersList == nil || ! ok {
return nil
}
for _ , l := range customHeadersList {
if l . Address == listenerAdd {
return l
}
}
return nil
}
// ExistCustomResponseHeader checks if a custom header is configured in any
// listener's stanza
func ( c * Core ) ExistCustomResponseHeader ( header string ) bool {
customHeaders := c . customListenerHeader . Load ( )
if customHeaders == nil {
return false
}
customHeadersList , ok := customHeaders . ( [ ] * ListenerCustomHeaders )
if customHeadersList == nil || ! ok {
return false
}
for _ , l := range customHeadersList {
exist := l . ExistCustomResponseHeader ( header )
if exist {
return true
}
}
return false
}
func ( c * Core ) ReloadCustomResponseHeaders ( ) error {
conf := c . rawConfig . Load ( )
if conf == nil {
return fmt . Errorf ( "failed to load core raw config" )
}
lns := conf . ( * server . Config ) . Listeners
if lns == nil {
return fmt . Errorf ( "no listener configured" )
}
uiHeaders , err := c . UIHeaders ( )
if err != nil {
return err
}
c . customListenerHeader . Store ( NewListenerCustomHeader ( lns , c . logger , uiHeaders ) )
return nil
}
2021-04-07 14:25:05 +00:00
// SanitizedConfig returns a sanitized version of the current config.
// See server.Config.Sanitized for specific values omitted.
func ( c * Core ) SanitizedConfig ( ) map [ string ] interface { } {
conf := c . rawConfig . Load ( )
if conf == nil {
return nil
}
return conf . ( * server . Config ) . Sanitized ( )
}
2020-05-21 20:07:50 +00:00
// LogFormat returns the log format current in use.
func ( c * Core ) LogFormat ( ) string {
conf := c . rawConfig . Load ( )
return conf . ( * server . Config ) . LogFormat
}
2023-01-07 01:53:09 +00:00
// LogLevel returns the log level provided by level provided by config, CLI flag, or env
func ( c * Core ) LogLevel ( ) string {
return c . logLevel
}
2019-10-04 07:29:51 +00:00
// MetricsHelper returns the global metrics helper which allows external
// packages to access Vault's internal metrics.
func ( c * Core ) MetricsHelper ( ) * metricsutil . MetricsHelper {
return c . metricsHelper
}
2020-05-13 02:00:59 +00:00
// MetricSink returns the metrics wrapper with which Core has been configured.
func ( c * Core ) MetricSink ( ) * metricsutil . ClusterMetricSink {
return c . metricSink
}
2018-11-07 01:21:24 +00:00
// BuiltinRegistry is an interface that allows the "vault" package to use
// the registry of builtin plugins without getting an import cycle. It
// also allows for mocking the registry easily.
type BuiltinRegistry interface {
Contains ( name string , pluginType consts . PluginType ) bool
Get ( name string , pluginType consts . PluginType ) ( func ( ) ( interface { } , error ) , bool )
Keys ( pluginType consts . PluginType ) [ ] string
2022-08-23 20:34:30 +00:00
DeprecationStatus ( name string , pluginType consts . PluginType ) ( consts . DeprecationStatus , bool )
2023-07-10 19:52:42 +00:00
IsBuiltinEntPlugin ( name string , pluginType consts . PluginType ) bool
2018-11-07 01:21:24 +00:00
}
2020-02-06 16:56:37 +00:00
func ( c * Core ) AuditLogger ( ) AuditLogger {
return & basicAuditor { c : c }
}
2020-02-19 23:06:53 +00:00
type FeatureFlags struct {
NamespacesCubbyholesLocal bool ` json:"namespace_cubbyholes_local" `
}
func ( c * Core ) persistFeatureFlags ( ctx context . Context ) error {
2020-06-04 17:00:33 +00:00
if ! c . PR1103disabled {
c . logger . Debug ( "persisting feature flags" )
json , err := jsonutil . EncodeJSON ( & FeatureFlags { NamespacesCubbyholesLocal : ! c . PR1103disabled } )
if err != nil {
return err
}
return c . barrier . Put ( ctx , & logical . StorageEntry {
Key : consts . CoreFeatureFlagPath ,
Value : json ,
} )
2020-02-19 23:06:53 +00:00
}
2020-06-04 17:00:33 +00:00
return nil
2020-02-19 23:06:53 +00:00
}
func ( c * Core ) readFeatureFlags ( ctx context . Context ) ( * FeatureFlags , error ) {
entry , err := c . barrier . Get ( ctx , consts . CoreFeatureFlagPath )
if err != nil {
return nil , err
}
var flags FeatureFlags
if entry != nil {
err = jsonutil . DecodeJSON ( entry . Value , & flags )
if err != nil {
return nil , err
}
}
return & flags , nil
}
2020-06-26 21:13:16 +00:00
2022-12-02 18:16:31 +00:00
// isMountable tells us whether or not we can continue mounting a plugin-based
// mount entry after failing to instantiate a backend. We do this to preserve
// the storage and path when a plugin is missing or has otherwise been
// misconfigured. This allows users to recover from errors when starting Vault
// with misconfigured plugins. It should not be possible for existing builtins
// to be misconfigured, so that is a fatal error.
func ( c * Core ) isMountable ( ctx context . Context , entry * MountEntry , pluginType consts . PluginType ) bool {
2022-12-14 18:06:33 +00:00
return ! c . isMountEntryBuiltin ( ctx , entry , pluginType )
}
// isMountEntryBuiltin determines whether a mount entry is associated with a
// builtin of the specified plugin type.
func ( c * Core ) isMountEntryBuiltin ( ctx context . Context , entry * MountEntry , pluginType consts . PluginType ) bool {
2022-12-02 18:16:31 +00:00
// Prevent a panic early on
if entry == nil || c . pluginCatalog == nil {
return false
}
2022-12-14 18:06:33 +00:00
// Allow type to be determined from mount entry when not otherwise specified
if pluginType == consts . PluginTypeUnknown {
pluginType = c . builtinTypeFromMountEntry ( ctx , entry )
}
2022-12-02 18:16:31 +00:00
// Handle aliases
2022-12-14 18:06:33 +00:00
pluginName := entry . Type
if alias , ok := mountAliases [ pluginName ] ; ok {
pluginName = alias
2022-12-02 18:16:31 +00:00
}
2022-12-14 18:06:33 +00:00
plug , err := c . pluginCatalog . Get ( ctx , pluginName , pluginType , entry . Version )
if err != nil || plug == nil {
2022-12-02 18:16:31 +00:00
return false
}
2022-12-14 18:06:33 +00:00
return plug . Builtin
2022-12-02 18:16:31 +00:00
}
2020-06-26 21:13:16 +00:00
// MatchingMount returns the path of the mount that will be responsible for
// handling the given request path.
func ( c * Core ) MatchingMount ( ctx context . Context , reqPath string ) string {
return c . router . MatchingMount ( ctx , reqPath )
}
func ( c * Core ) setupQuotas ( ctx context . Context , isPerfStandby bool ) error {
if c . quotaManager == nil {
return nil
}
2021-06-02 16:12:05 +00:00
return c . quotaManager . Setup ( ctx , c . systemBarrierView , isPerfStandby , c . IsDRSecondary ( ) )
2020-06-26 21:13:16 +00:00
}
2020-10-16 18:58:19 +00:00
// ApplyRateLimitQuota checks the request against all the applicable quota rules.
// If the given request's path is exempt, no rate limiting will be applied.
2021-09-01 21:28:47 +00:00
func ( c * Core ) ApplyRateLimitQuota ( ctx context . Context , req * quotas . Request ) ( quotas . Response , error ) {
2020-06-26 21:13:16 +00:00
req . Type = quotas . TypeRateLimit
2020-10-16 18:58:19 +00:00
resp := quotas . Response {
Allowed : true ,
Headers : make ( map [ string ] string ) ,
}
2020-07-02 01:14:33 +00:00
if c . quotaManager != nil {
2020-10-16 18:58:19 +00:00
// skip rate limit checks for paths that are exempt from rate limiting
if c . quotaManager . RateLimitPathExempt ( req . Path ) {
return resp , nil
}
2021-09-01 21:28:47 +00:00
return c . quotaManager . ApplyQuota ( ctx , req )
2020-07-02 01:14:33 +00:00
}
2020-10-16 18:58:19 +00:00
return resp , nil
2020-06-26 21:13:16 +00:00
}
// RateLimitAuditLoggingEnabled returns if the quota configuration allows audit
// logging of request rejections due to rate limiting quota rule violations.
func ( c * Core ) RateLimitAuditLoggingEnabled ( ) bool {
2020-07-02 01:14:33 +00:00
if c . quotaManager != nil {
return c . quotaManager . RateLimitAuditLoggingEnabled ( )
}
return false
2020-06-26 21:13:16 +00:00
}
2020-07-29 19:15:05 +00:00
// RateLimitResponseHeadersEnabled returns if the quota configuration allows for
// rate limit quota HTTP headers to be added to responses.
func ( c * Core ) RateLimitResponseHeadersEnabled ( ) bool {
if c . quotaManager != nil {
return c . quotaManager . RateLimitResponseHeadersEnabled ( )
}
return false
}
2020-12-08 18:57:44 +00:00
func ( c * Core ) KeyRotateGracePeriod ( ) time . Duration {
return time . Duration ( atomic . LoadInt64 ( c . keyRotateGracePeriod ) )
}
func ( c * Core ) SetKeyRotateGracePeriod ( t time . Duration ) {
atomic . StoreInt64 ( c . keyRotateGracePeriod , int64 ( t ) )
}
2021-02-24 11:58:10 +00:00
2021-02-25 20:27:25 +00:00
// Periodically test whether to automatically rotate the barrier key
func ( c * Core ) autoRotateBarrierLoop ( ctx context . Context ) {
t := time . NewTicker ( autoRotateCheckInterval )
for {
select {
case <- t . C :
c . checkBarrierAutoRotate ( ctx )
case <- ctx . Done ( ) :
t . Stop ( )
return
}
}
}
func ( c * Core ) checkBarrierAutoRotate ( ctx context . Context ) {
2021-03-01 22:32:17 +00:00
c . stateLock . RLock ( )
defer c . stateLock . RUnlock ( )
2021-02-25 20:27:25 +00:00
if c . isPrimary ( ) {
reason , err := c . barrier . CheckBarrierAutoRotate ( ctx )
if err != nil {
lf := c . logger . Error
if strings . HasSuffix ( err . Error ( ) , "context canceled" ) {
lf = c . logger . Debug
}
lf ( "error in barrier auto rotation" , "error" , err )
return
}
if reason != "" {
// Time to rotate. Invoke the rotation handler in order to both rotate and create
// the replication canary
c . logger . Info ( "automatic barrier key rotation triggered" , "reason" , reason )
_ , err := c . systemBackend . handleRotate ( ctx , nil , nil )
if err != nil {
c . logger . Error ( "error automatically rotating barrier key" , "error" , err )
} else {
metrics . IncrCounter ( barrierRotationsMetric , 1 )
}
}
}
}
func ( c * Core ) isPrimary ( ) bool {
return ! c . ReplicationState ( ) . HasState ( consts . ReplicationPerformanceSecondary | consts . ReplicationDRSecondary )
}
2021-05-20 17:32:15 +00:00
type LicenseState struct {
State string
ExpiryTime time . Time
Terminated bool
}
2021-11-30 19:49:58 +00:00
2022-05-05 22:53:57 +00:00
func ( c * Core ) loadLoginMFAConfigs ( ctx context . Context ) error {
eConfigs := make ( [ ] * mfa . MFAEnforcementConfig , 0 )
allNamespaces := c . collectNamespaces ( )
for _ , ns := range allNamespaces {
err := c . loginMFABackend . loadMFAMethodConfigs ( ctx , ns )
if err != nil {
return fmt . Errorf ( "error loading MFA method Config, namespaceid %s, error: %w" , ns . ID , err )
}
loadedConfigs , err := c . loginMFABackend . loadMFAEnforcementConfigs ( ctx , ns )
if err != nil {
return fmt . Errorf ( "error loading MFA enforcement Config, namespaceid %s, error: %w" , ns . ID , err )
}
eConfigs = append ( eConfigs , loadedConfigs ... )
}
for _ , conf := range eConfigs {
if err := c . loginMFABackend . loginMFAMethodExistenceCheck ( conf ) ; err != nil {
c . loginMFABackend . mfaLogger . Error ( "failed to find all MFA methods that exist in MFA enforcement configs" , "configID" , conf . ID , "namespaceID" , conf . NamespaceID , "error" , err . Error ( ) )
}
}
return nil
}
2022-02-17 21:08:51 +00:00
type MFACachedAuthResponse struct {
CachedAuth * logical . Auth
RequestPath string
RequestNSID string
RequestNSPath string
RequestConnRemoteAddr string
TimeOfStorage time . Time
RequestID string
}
func ( c * Core ) setupCachedMFAResponseAuth ( ) {
c . mfaResponseAuthQueueLock . Lock ( )
c . mfaResponseAuthQueue = NewLoginMFAPriorityQueue ( )
mfaQueue := c . mfaResponseAuthQueue
c . mfaResponseAuthQueueLock . Unlock ( )
ctx := c . activeContext
go func ( ) {
ticker := time . Tick ( 5 * time . Second )
for {
select {
case <- ctx . Done ( ) :
return
case <- ticker :
err := mfaQueue . RemoveExpiredMfaAuthResponse ( defaultMFAAuthResponseTTL , time . Now ( ) )
if err != nil {
c . Logger ( ) . Error ( "failed to remove stale MFA auth response" , "error" , err )
}
}
}
} ( )
return
}
2023-01-12 22:09:33 +00:00
// updateLockedUserEntries runs every 15 mins to remove stale user entries from storage
// it also updates the userFailedLoginInfo map with correct information for locked users if incorrect
func ( c * Core ) updateLockedUserEntries ( ) {
2023-08-17 21:41:15 +00:00
if c . updateLockedUserEntriesCancel != nil {
return
}
var updateLockedUserEntriesCtx context . Context
updateLockedUserEntriesCtx , c . updateLockedUserEntriesCancel = context . WithCancel ( c . activeContext )
if err := c . runLockedUserEntryUpdates ( updateLockedUserEntriesCtx ) ; err != nil {
c . Logger ( ) . Error ( "failed to run locked user entry updates" , "error" , err )
}
2023-01-12 22:09:33 +00:00
go func ( ) {
ticker := time . NewTicker ( 15 * time . Minute )
for {
select {
2023-08-17 21:41:15 +00:00
case <- updateLockedUserEntriesCtx . Done ( ) :
2023-01-12 22:09:33 +00:00
ticker . Stop ( )
return
case <- ticker . C :
2023-08-17 21:41:15 +00:00
if err := c . runLockedUserEntryUpdates ( updateLockedUserEntriesCtx ) ; err != nil {
2023-01-12 22:09:33 +00:00
c . Logger ( ) . Error ( "failed to run locked user entry updates" , "error" , err )
}
}
}
} ( )
2023-10-25 15:38:58 +00:00
return
2023-01-12 22:09:33 +00:00
}
// runLockedUserEntryUpdates runs updates for locked user storage entries and userFailedLoginInfo map
func ( c * Core ) runLockedUserEntryUpdates ( ctx context . Context ) error {
// check environment variable to see if user lockout workflow is disabled
var disableUserLockout bool
if disableUserLockoutEnv := os . Getenv ( consts . VaultDisableUserLockout ) ; disableUserLockoutEnv != "" {
var err error
disableUserLockout , err = strconv . ParseBool ( disableUserLockoutEnv )
if err != nil {
c . Logger ( ) . Error ( "Error parsing the environment variable VAULT_DISABLE_USER_LOCKOUT" , "error" , err )
}
}
if disableUserLockout {
return nil
}
// get the list of namespaces of locked users from locked users path in storage
nsIDs , err := c . barrier . List ( ctx , coreLockedUsersPath )
if err != nil {
return err
}
2023-01-17 23:10:50 +00:00
totalLockedUsersCount := 0
2023-01-12 22:09:33 +00:00
for _ , nsID := range nsIDs {
// get the list of mount accessors of locked users for each namespace
mountAccessors , err := c . barrier . List ( ctx , coreLockedUsersPath + nsID )
if err != nil {
return err
}
// update the entries for locked users for each mount accessor
// if storage entry is stale i.e; the lockout duration has passed
// remove this entry from storage and userFailedLoginInfo map
// else check if the userFailedLoginInfo map has correct failed login information
// if incorrect, update the entry in userFailedLoginInfo map
for _ , mountAccessorPath := range mountAccessors {
mountAccessor := strings . TrimSuffix ( mountAccessorPath , "/" )
2023-01-17 23:10:50 +00:00
lockedAliasesCount , err := c . runLockedUserEntryUpdatesForMountAccessor ( ctx , mountAccessor , coreLockedUsersPath + nsID + mountAccessorPath )
if err != nil {
2023-01-12 22:09:33 +00:00
return err
}
2023-01-17 23:10:50 +00:00
totalLockedUsersCount = totalLockedUsersCount + lockedAliasesCount
2023-01-12 22:09:33 +00:00
}
}
2023-01-17 23:10:50 +00:00
// emit locked user count metrics
metrics . SetGaugeWithLabels ( [ ] string { "core" , "locked_users" } , float32 ( totalLockedUsersCount ) , nil )
2023-01-12 22:09:33 +00:00
return nil
}
// runLockedUserEntryUpdatesForMountAccessor updates the storage entry for each locked user (alias name)
// if the entry is stale, it removes it from storage and userFailedLoginInfo map if present
// if the entry is not stale, it updates the userFailedLoginInfo map with correct values for entry if incorrect
2023-01-17 23:10:50 +00:00
func ( c * Core ) runLockedUserEntryUpdatesForMountAccessor ( ctx context . Context , mountAccessor string , path string ) ( int , error ) {
2023-01-12 22:09:33 +00:00
// get mount entry for mountAccessor
mountEntry := c . router . MatchingMountByAccessor ( mountAccessor )
if mountEntry == nil {
mountEntry = & MountEntry { }
}
// get configuration for mount entry
userLockoutConfiguration := c . getUserLockoutConfiguration ( mountEntry )
// get the list of aliases for mount accessor
aliases , err := c . barrier . List ( ctx , path )
if err != nil {
2023-01-17 23:10:50 +00:00
return 0 , err
2023-01-12 22:09:33 +00:00
}
2023-01-17 23:10:50 +00:00
lockedAliasesCount := len ( aliases )
2023-01-12 22:09:33 +00:00
// check storage entry for each alias to update
for _ , alias := range aliases {
loginUserInfoKey := FailedLoginUser {
aliasName : alias ,
mountAccessor : mountAccessor ,
}
existingEntry , err := c . barrier . Get ( ctx , path + alias )
if err != nil {
2023-01-17 23:10:50 +00:00
return 0 , err
2023-01-12 22:09:33 +00:00
}
if existingEntry == nil {
continue
}
var lastLoginTime int
err = jsonutil . DecodeJSON ( existingEntry . Value , & lastLoginTime )
if err != nil {
2023-01-17 23:10:50 +00:00
return 0 , err
2023-01-12 22:09:33 +00:00
}
lastFailedLoginTimeFromStorageEntry := time . Unix ( int64 ( lastLoginTime ) , 0 )
lockoutDurationFromConfiguration := userLockoutConfiguration . LockoutDuration
// get the entry for the locked user from userFailedLoginInfo map
2023-05-31 17:51:20 +00:00
failedLoginInfoFromMap := c . LocalGetUserFailedLoginInfo ( ctx , loginUserInfoKey )
2023-01-12 22:09:33 +00:00
// check if the storage entry for locked user is stale
if time . Now ( ) . After ( lastFailedLoginTimeFromStorageEntry . Add ( lockoutDurationFromConfiguration ) ) {
// stale entry, remove from storage
2023-05-31 17:51:20 +00:00
// leaving this as it is as this happens on the active node
// also handles case where namespace is deleted
2023-01-12 22:09:33 +00:00
if err := c . barrier . Delete ( ctx , path + alias ) ; err != nil {
2023-01-17 23:10:50 +00:00
return 0 , err
2023-01-12 22:09:33 +00:00
}
// remove entry for this user from userFailedLoginInfo map if present as the user is not locked
if failedLoginInfoFromMap != nil {
2023-05-31 17:51:20 +00:00
if err = updateUserFailedLoginInfo ( ctx , c , loginUserInfoKey , nil , true ) ; err != nil {
2023-01-17 23:10:50 +00:00
return 0 , err
2023-01-12 22:09:33 +00:00
}
}
2023-01-17 23:10:50 +00:00
lockedAliasesCount -= 1
2023-01-12 22:09:33 +00:00
continue
}
// this is not a stale entry
// update the map with actual failed login information
actualFailedLoginInfo := FailedLoginInfo {
lastFailedLoginTime : lastLoginTime ,
count : uint ( userLockoutConfiguration . LockoutThreshold ) ,
}
if failedLoginInfoFromMap != & actualFailedLoginInfo {
// entry is invalid, updating the entry in userFailedLoginMap with correct information
2023-05-31 17:51:20 +00:00
if err = updateUserFailedLoginInfo ( ctx , c , loginUserInfoKey , & actualFailedLoginInfo , false ) ; err != nil {
2023-01-17 23:10:50 +00:00
return 0 , err
2023-01-12 22:09:33 +00:00
}
}
}
2023-01-17 23:10:50 +00:00
return lockedAliasesCount , nil
2023-01-12 22:09:33 +00:00
}
2022-02-17 21:08:51 +00:00
// PopMFAResponseAuthByID pops an item from the mfaResponseAuthQueue by ID
// it returns the cached auth response or an error
func ( c * Core ) PopMFAResponseAuthByID ( reqID string ) ( * MFACachedAuthResponse , error ) {
c . mfaResponseAuthQueueLock . Lock ( )
defer c . mfaResponseAuthQueueLock . Unlock ( )
return c . mfaResponseAuthQueue . PopByKey ( reqID )
}
// SaveMFAResponseAuth pushes an MFACachedAuthResponse to the mfaResponseAuthQueue.
// it returns an error in case of failure
func ( c * Core ) SaveMFAResponseAuth ( respAuth * MFACachedAuthResponse ) error {
c . mfaResponseAuthQueueLock . Lock ( )
defer c . mfaResponseAuthQueueLock . Unlock ( )
return c . mfaResponseAuthQueue . Push ( respAuth )
}
2021-12-08 22:34:42 +00:00
type InFlightRequests struct {
InFlightReqMap * sync . Map
InFlightReqCount * uberAtomic . Uint64
}
type InFlightReqData struct {
StartTime time . Time ` json:"start_time" `
ClientRemoteAddr string ` json:"client_remote_address" `
ReqPath string ` json:"request_path" `
Method string ` json:"request_method" `
ClientID string ` json:"client_id" `
}
func ( c * Core ) StoreInFlightReqData ( reqID string , data InFlightReqData ) {
c . inFlightReqData . InFlightReqMap . Store ( reqID , data )
c . inFlightReqData . InFlightReqCount . Inc ( )
}
// FinalizeInFlightReqData is going log the completed request if the
// corresponding server config option is enabled. It also removes the
// request from the inFlightReqMap and decrement the number of in-flight
// requests by one.
func ( c * Core ) FinalizeInFlightReqData ( reqID string , statusCode int ) {
if c . logRequestsLevel != nil && c . logRequestsLevel . Load ( ) != 0 {
c . LogCompletedRequests ( reqID , statusCode )
}
c . inFlightReqData . InFlightReqMap . Delete ( reqID )
c . inFlightReqData . InFlightReqCount . Dec ( )
}
// LoadInFlightReqData creates a snapshot map of the current
// in-flight requests
func ( c * Core ) LoadInFlightReqData ( ) map [ string ] InFlightReqData {
currentInFlightReqMap := make ( map [ string ] InFlightReqData )
c . inFlightReqData . InFlightReqMap . Range ( func ( key , value interface { } ) bool {
// there is only one writer to this map, so skip checking for errors
v := value . ( InFlightReqData )
currentInFlightReqMap [ key . ( string ) ] = v
return true
} )
return currentInFlightReqMap
}
// UpdateInFlightReqData updates the data for a specific reqID with
// the clientID
func ( c * Core ) UpdateInFlightReqData ( reqID , clientID string ) {
v , ok := c . inFlightReqData . InFlightReqMap . Load ( reqID )
if ! ok {
2021-12-13 20:28:35 +00:00
c . Logger ( ) . Trace ( "failed to retrieve request with ID" , "request_id" , reqID )
2021-12-08 22:34:42 +00:00
return
}
// there is only one writer to this map, so skip checking for errors
reqData := v . ( InFlightReqData )
reqData . ClientID = clientID
c . inFlightReqData . InFlightReqMap . Store ( reqID , reqData )
}
// LogCompletedRequests Logs the completed request to the server logs
func ( c * Core ) LogCompletedRequests ( reqID string , statusCode int ) {
logLevel := log . Level ( c . logRequestsLevel . Load ( ) )
v , ok := c . inFlightReqData . InFlightReqMap . Load ( reqID )
if ! ok {
c . logger . Log ( logLevel , fmt . Sprintf ( "failed to retrieve request with ID %v" , reqID ) )
return
}
// there is only one writer to this map, so skip checking for errors
reqData := v . ( InFlightReqData )
2022-01-20 13:55:30 +00:00
c . logger . Log ( logLevel , "completed_request" ,
"start_time" , reqData . StartTime . Format ( time . RFC3339 ) ,
"duration" , fmt . Sprintf ( "%dms" , time . Now ( ) . Sub ( reqData . StartTime ) . Milliseconds ( ) ) ,
"client_id" , reqData . ClientID ,
"client_address" , reqData . ClientRemoteAddr , "status_code" , statusCode , "request_path" , reqData . ReqPath ,
"request_method" , reqData . Method )
2021-12-08 22:34:42 +00:00
}
func ( c * Core ) ReloadLogRequestsLevel ( ) {
conf := c . rawConfig . Load ( )
if conf == nil {
return
}
infoLevel := conf . ( * server . Config ) . LogRequestsLevel
switch {
case log . LevelFromString ( infoLevel ) > log . NoLevel && log . LevelFromString ( infoLevel ) < log . Off :
c . logRequestsLevel . Store ( int32 ( log . LevelFromString ( infoLevel ) ) )
case infoLevel != "" :
c . logger . Warn ( "invalid log_requests_level" , "level" , infoLevel )
}
}
2022-12-15 20:19:19 +00:00
func ( c * Core ) ReloadIntrospectionEndpointEnabled ( ) {
conf := c . rawConfig . Load ( )
if conf == nil {
return
}
c . introspectionEnabledLock . Lock ( )
defer c . introspectionEnabledLock . Unlock ( )
c . introspectionEnabled = conf . ( * server . Config ) . EnableIntrospectionEndpoint
}
2021-11-30 19:49:58 +00:00
type PeerNode struct {
Hostname string ` json:"hostname" `
APIAddress string ` json:"api_address" `
ClusterAddress string ` json:"cluster_address" `
2022-05-20 20:49:11 +00:00
Version string ` json:"version" `
2021-11-30 19:49:58 +00:00
LastEcho time . Time ` json:"last_echo" `
2022-05-20 20:49:11 +00:00
UpgradeVersion string ` json:"upgrade_version,omitempty" `
RedundancyZone string ` json:"redundancy_zone,omitempty" `
2021-11-30 19:49:58 +00:00
}
// GetHAPeerNodesCached returns the nodes that've sent us Echo requests recently.
func ( c * Core ) GetHAPeerNodesCached ( ) [ ] PeerNode {
var nodes [ ] PeerNode
for itemClusterAddr , item := range c . clusterPeerClusterAddrsCache . Items ( ) {
info := item . Object . ( nodeHAConnectionInfo )
nodes = append ( nodes , PeerNode {
Hostname : info . nodeInfo . Hostname ,
APIAddress : info . nodeInfo . ApiAddr ,
ClusterAddress : itemClusterAddr ,
LastEcho : info . lastHeartbeat ,
2022-05-20 20:49:11 +00:00
Version : info . version ,
UpgradeVersion : info . upgradeVersion ,
RedundancyZone : info . redundancyZone ,
2021-11-30 19:49:58 +00:00
} )
}
return nodes
2022-01-19 15:56:04 +00:00
}
2022-04-04 16:45:41 +00:00
func ( c * Core ) CheckPluginPerms ( pluginName string ) ( err error ) {
2022-05-17 18:34:31 +00:00
var enableFilePermissionsCheck bool
if enableFilePermissionsCheckEnv := os . Getenv ( consts . VaultEnableFilePermissionsCheckEnv ) ; enableFilePermissionsCheckEnv != "" {
var err error
enableFilePermissionsCheck , err = strconv . ParseBool ( enableFilePermissionsCheckEnv )
if err != nil {
return errors . New ( "Error parsing the environment variable VAULT_ENABLE_FILE_PERMISSIONS_CHECK" )
}
}
if c . pluginDirectory != "" && enableFilePermissionsCheck {
2022-04-04 16:45:41 +00:00
err = osutil . OwnerPermissionsMatch ( c . pluginDirectory , c . pluginFileUid , c . pluginFilePermissions )
if err != nil {
return err
}
fullPath := filepath . Join ( c . pluginDirectory , pluginName )
err = osutil . OwnerPermissionsMatch ( fullPath , c . pluginFileUid , c . pluginFilePermissions )
if err != nil {
return err
}
}
return err
}
2022-06-24 12:58:02 +00:00
2022-09-06 18:11:04 +00:00
func ( c * Core ) LoadNodeID ( ) ( string , error ) {
raftNodeID := c . GetRaftNodeID ( )
if raftNodeID != "" {
return raftNodeID , nil
}
hostname , err := os . Hostname ( )
if err != nil {
return "" , err
}
return hostname , nil
}
2022-07-05 17:02:00 +00:00
// DetermineRoleFromLoginRequestFromBytes will determine the role that should be applied to a quota for a given
// login request, accepting a byte payload
2023-08-30 15:28:32 +00:00
func ( c * Core ) DetermineRoleFromLoginRequestFromBytes ( ctx context . Context , mountPoint string , payload [ ] byte ) string {
2022-07-05 17:02:00 +00:00
data := make ( map [ string ] interface { } )
err := jsonutil . DecodeJSON ( payload , & data )
if err != nil {
// Cannot discern a role from a request we cannot parse
return ""
}
2023-08-30 15:28:32 +00:00
return c . DetermineRoleFromLoginRequest ( ctx , mountPoint , data )
2022-07-05 17:02:00 +00:00
}
2022-06-24 12:58:02 +00:00
// DetermineRoleFromLoginRequest will determine the role that should be applied to a quota for a given
// login request
2023-08-30 15:28:32 +00:00
func ( c * Core ) DetermineRoleFromLoginRequest ( ctx context . Context , mountPoint string , data map [ string ] interface { } ) string {
2022-07-05 17:02:00 +00:00
c . authLock . RLock ( )
defer c . authLock . RUnlock ( )
2022-06-24 12:58:02 +00:00
matchingBackend := c . router . MatchingBackend ( ctx , mountPoint )
if matchingBackend == nil || matchingBackend . Type ( ) != logical . TypeCredential {
// Role based quotas do not apply to this request
return ""
}
resp , err := matchingBackend . HandleRequest ( ctx , & logical . Request {
MountPoint : mountPoint ,
Path : "login" ,
Operation : logical . ResolveRoleOperation ,
Data : data ,
Storage : c . router . MatchingStorageByAPIPath ( ctx , mountPoint + "login" ) ,
} )
if err != nil || resp . Data [ "role" ] == nil {
return ""
}
2023-08-30 15:28:32 +00:00
2022-06-24 12:58:02 +00:00
return resp . Data [ "role" ] . ( string )
}
2022-09-06 18:11:04 +00:00
2023-08-30 15:28:32 +00:00
// ResolveRoleForQuotas looks for any quotas requiring a role for early
// computation in the RateLimitQuotaWrapping handler.
func ( c * Core ) ResolveRoleForQuotas ( ctx context . Context , req * quotas . Request ) ( bool , error ) {
if c . quotaManager == nil {
return false , nil
}
return c . quotaManager . QueryResolveRoleQuotas ( req )
}
2022-12-07 01:22:46 +00:00
// aliasNameFromLoginRequest will determine the aliasName from the login Request
func ( c * Core ) aliasNameFromLoginRequest ( ctx context . Context , req * logical . Request ) ( string , error ) {
c . authLock . RLock ( )
defer c . authLock . RUnlock ( )
ns , err := namespace . FromContext ( ctx )
if err != nil {
return "" , err
}
// ns path is added while checking matching backend
mountPath := strings . TrimPrefix ( req . MountPoint , ns . Path )
matchingBackend := c . router . MatchingBackend ( ctx , mountPath )
if matchingBackend == nil || matchingBackend . Type ( ) != logical . TypeCredential {
// pathLoginAliasLookAhead operation does not apply to this request
return "" , nil
}
path := strings . ReplaceAll ( req . Path , mountPath , "" )
resp , err := matchingBackend . HandleRequest ( ctx , & logical . Request {
MountPoint : req . MountPoint ,
Path : path ,
Operation : logical . AliasLookaheadOperation ,
Data : req . Data ,
Storage : c . router . MatchingStorageByAPIPath ( ctx , req . Path ) ,
} )
if err != nil || resp . Auth . Alias == nil {
return "" , nil
}
return resp . Auth . Alias . Name , nil
}
2022-09-06 18:11:04 +00:00
// ListMounts will provide a slice containing a deep copy each mount entry
func ( c * Core ) ListMounts ( ) ( [ ] * MountEntry , error ) {
2023-01-23 14:59:15 +00:00
if c . Sealed ( ) {
return nil , fmt . Errorf ( "vault is sealed" )
}
2022-09-06 18:11:04 +00:00
c . mountsLock . RLock ( )
defer c . mountsLock . RUnlock ( )
var entries [ ] * MountEntry
for _ , entry := range c . mounts . Entries {
clone , err := entry . Clone ( )
if err != nil {
return nil , err
}
entries = append ( entries , clone )
}
return entries , nil
}
// ListAuths will provide a slice containing a deep copy each auth entry
func ( c * Core ) ListAuths ( ) ( [ ] * MountEntry , error ) {
2023-01-23 14:59:15 +00:00
if c . Sealed ( ) {
return nil , fmt . Errorf ( "vault is sealed" )
}
2023-06-15 20:41:45 +00:00
c . authLock . RLock ( )
defer c . authLock . RUnlock ( )
2022-09-06 18:11:04 +00:00
var entries [ ] * MountEntry
for _ , entry := range c . auth . Entries {
clone , err := entry . Clone ( )
if err != nil {
return nil , err
}
entries = append ( entries , clone )
}
return entries , nil
}
2023-01-05 18:00:55 +00:00
type GroupPolicyApplicationMode struct {
GroupPolicyApplicationMode string ` json:"group_policy_application_mode" `
}
func ( c * Core ) GetGroupPolicyApplicationMode ( ctx context . Context ) ( string , error ) {
se , err := c . barrier . Get ( ctx , coreGroupPolicyApplicationPath )
if err != nil {
return "" , err
}
if se == nil {
return groupPolicyApplicationModeWithinNamespaceHierarchy , nil
}
var modeStruct GroupPolicyApplicationMode
err = jsonutil . DecodeJSON ( se . Value , & modeStruct )
if err != nil {
return "" , err
}
mode := modeStruct . GroupPolicyApplicationMode
if mode == "" {
mode = groupPolicyApplicationModeWithinNamespaceHierarchy
}
return mode , nil
}
func ( c * Core ) SetGroupPolicyApplicationMode ( ctx context . Context , mode string ) error {
json , err := jsonutil . EncodeJSON ( & GroupPolicyApplicationMode { GroupPolicyApplicationMode : mode } )
if err != nil {
return err
}
return c . barrier . Put ( ctx , & logical . StorageEntry {
Key : coreGroupPolicyApplicationPath ,
Value : json ,
} )
}
2022-09-06 18:11:04 +00:00
type HCPLinkStatus struct {
lock sync . RWMutex
ConnectionStatus string ` json:"hcp_link_status,omitempty" `
ResourceIDOnHCP string ` json:"resource_ID_on_hcp,omitempty" `
}
func ( c * Core ) SetHCPLinkStatus ( status , resourceID string ) {
c . hcpLinkStatus . lock . Lock ( )
defer c . hcpLinkStatus . lock . Unlock ( )
c . hcpLinkStatus . ConnectionStatus = status
c . hcpLinkStatus . ResourceIDOnHCP = resourceID
}
func ( c * Core ) GetHCPLinkStatus ( ) ( string , string ) {
c . hcpLinkStatus . lock . RLock ( )
defer c . hcpLinkStatus . lock . RUnlock ( )
status := c . hcpLinkStatus . ConnectionStatus
resourceID := c . hcpLinkStatus . ResourceIDOnHCP
return status , resourceID
}
2023-01-06 22:06:54 +00:00
2023-02-09 21:18:58 +00:00
// IsExperimentEnabled is true if the experiment is enabled in the core.
func ( c * Core ) IsExperimentEnabled ( experiment string ) bool {
2023-01-16 16:07:18 +00:00
return strutil . StrListContains ( c . experiments , experiment )
}
2023-01-07 01:53:09 +00:00
// ListenerAddresses provides a slice of configured listener addresses
func ( c * Core ) ListenerAddresses ( ) ( [ ] string , error ) {
addresses := make ( [ ] string , 0 )
conf := c . rawConfig . Load ( )
if conf == nil {
return nil , fmt . Errorf ( "failed to load core raw config" )
}
listeners := conf . ( * server . Config ) . Listeners
if listeners == nil {
return nil , fmt . Errorf ( "no listener configured" )
}
for _ , listener := range listeners {
addresses = append ( addresses , listener . Address )
}
return addresses , nil
}
// IsRaftVoter specifies whether the node is a raft voter which is
// always false if raft storage is not in use.
func ( c * Core ) IsRaftVoter ( ) bool {
raftInfo := c . raftInfo . Load ( ) . ( * raftInformation )
if raftInfo == nil {
return false
}
return ! raftInfo . nonVoter
}
2023-01-06 22:06:54 +00:00
func ( c * Core ) HAEnabled ( ) bool {
return c . ha != nil && c . ha . HAEnabled ( )
}
func ( c * Core ) GetRaftConfiguration ( ctx context . Context ) ( * raft . RaftConfigurationResponse , error ) {
raftBackend := c . getRaftBackend ( )
if raftBackend == nil {
return nil , nil
}
return raftBackend . GetConfiguration ( ctx )
}
func ( c * Core ) GetRaftAutopilotState ( ctx context . Context ) ( * raft . AutopilotState , error ) {
raftBackend := c . getRaftBackend ( )
if raftBackend == nil {
return nil , nil
}
return raftBackend . GetAutopilotServerState ( ctx )
}
2023-02-09 21:18:58 +00:00
// Events returns a reference to the common event bus for sending and subscribint to events.
func ( c * Core ) Events ( ) * eventbus . EventBus {
return c . events
}
2023-10-30 17:21:47 +00:00
func ( c * Core ) DetectStateLockDeadlocks ( ) bool {
if _ , ok := c . stateLock . ( * locking . DeadlockRWMutex ) ; ok {
return true
}
return false
}