2015-03-09 23:33:27 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
2017-12-01 22:08:38 +00:00
|
|
|
"context"
|
2016-08-15 13:42:42 +00:00
|
|
|
"crypto/ecdsa"
|
2017-02-17 01:13:19 +00:00
|
|
|
"crypto/subtle"
|
2016-08-15 13:42:42 +00:00
|
|
|
"crypto/x509"
|
2015-03-10 00:45:34 +00:00
|
|
|
"errors"
|
2015-03-09 23:33:27 +00:00
|
|
|
"fmt"
|
2016-08-15 13:42:42 +00:00
|
|
|
"net"
|
|
|
|
"net/http"
|
2015-05-02 20:28:33 +00:00
|
|
|
"net/url"
|
2017-04-04 00:52:29 +00:00
|
|
|
"path/filepath"
|
2015-03-10 00:45:34 +00:00
|
|
|
"sync"
|
2017-10-23 20:06:27 +00:00
|
|
|
"sync/atomic"
|
2015-04-08 23:43:17 +00:00
|
|
|
"time"
|
2015-03-09 23:33:27 +00:00
|
|
|
|
2016-10-29 21:01:49 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2016-08-19 20:45:17 +00:00
|
|
|
log "github.com/mgutz/logxi/v1"
|
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
|
2015-11-02 16:01:00 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2015-11-02 18:29:18 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2015-12-16 17:56:20 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2015-03-27 20:45:13 +00:00
|
|
|
"github.com/hashicorp/vault/audit"
|
2017-02-16 18:37:21 +00:00
|
|
|
"github.com/hashicorp/vault/helper/consts"
|
2016-07-28 19:19:27 +00:00
|
|
|
"github.com/hashicorp/vault/helper/errutil"
|
2017-10-11 17:21:20 +00:00
|
|
|
"github.com/hashicorp/vault/helper/identity"
|
2016-08-15 13:42:42 +00:00
|
|
|
"github.com/hashicorp/vault/helper/jsonutil"
|
2016-08-19 20:45:17 +00:00
|
|
|
"github.com/hashicorp/vault/helper/logformat"
|
2015-04-28 21:59:43 +00:00
|
|
|
"github.com/hashicorp/vault/helper/mlock"
|
2017-07-31 15:28:06 +00:00
|
|
|
"github.com/hashicorp/vault/helper/reload"
|
2017-08-30 20:28:23 +00:00
|
|
|
"github.com/hashicorp/vault/helper/tlsutil"
|
2015-03-15 21:53:41 +00:00
|
|
|
"github.com/hashicorp/vault/logical"
|
2015-03-09 23:33:27 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
2015-03-11 18:34:08 +00:00
|
|
|
"github.com/hashicorp/vault/shamir"
|
2017-05-25 00:51:53 +00:00
|
|
|
cache "github.com/patrickmn/go-cache"
|
2015-03-09 23:33:27 +00:00
|
|
|
)
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
const (
|
2015-04-14 21:06:15 +00:00
|
|
|
// coreLockPath is the path used to acquire a coordinating lock
|
|
|
|
// for a highly-available deploy.
|
|
|
|
coreLockPath = "core/lock"
|
|
|
|
|
2017-03-03 20:00:46 +00:00
|
|
|
// The poison pill is used as a check during certain scenarios to indicate
|
|
|
|
// to standby nodes that they should seal
|
|
|
|
poisonPillPath = "core/poison-pill"
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// coreLeaderPrefix is the prefix used for the UUID that contains
|
|
|
|
// the currently elected leader.
|
|
|
|
coreLeaderPrefix = "core/leader/"
|
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
// knownPrimaryAddrsPrefix is used to store last-known cluster address
|
|
|
|
// information for primaries
|
|
|
|
knownPrimaryAddrsPrefix = "core/primary-addrs/"
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// lockRetryInterval is the interval we re-attempt to acquire the
|
|
|
|
// HA lock if an error is encountered
|
|
|
|
lockRetryInterval = 10 * time.Second
|
2015-05-28 23:11:31 +00:00
|
|
|
|
2017-05-24 19:06:56 +00:00
|
|
|
// leaderCheckInterval is how often a standby checks for a new leader
|
|
|
|
leaderCheckInterval = 2500 * time.Millisecond
|
|
|
|
|
2015-05-28 23:11:31 +00:00
|
|
|
// keyRotateCheckInterval is how often a standby checks for a key
|
|
|
|
// rotation taking place.
|
|
|
|
keyRotateCheckInterval = 30 * time.Second
|
|
|
|
|
|
|
|
// keyRotateGracePeriod is how long we allow an upgrade path
|
|
|
|
// for standby instances before we delete the upgrade keys
|
|
|
|
keyRotateGracePeriod = 2 * time.Minute
|
2015-10-08 18:34:10 +00:00
|
|
|
|
|
|
|
// leaderPrefixCleanDelay is how long to wait between deletions
|
|
|
|
// of orphaned leader keys, to prevent slamming the backend.
|
|
|
|
leaderPrefixCleanDelay = 200 * time.Millisecond
|
2017-02-17 01:13:19 +00:00
|
|
|
|
|
|
|
// coreKeyringCanaryPath is used as a canary to indicate to replicated
|
|
|
|
// clusters that they need to perform a rekey operation synchronously; this
|
|
|
|
// isn't keyring-canary to avoid ignoring it when ignoring core/keyring
|
|
|
|
coreKeyringCanaryPath = "core/canary-keyring"
|
2015-03-10 00:45:34 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrAlreadyInit is returned if the core is already
|
|
|
|
// initialized. This prevents a re-initialization.
|
|
|
|
ErrAlreadyInit = errors.New("Vault is already initialized")
|
|
|
|
|
|
|
|
// ErrNotInit is returned if a non-initialized barrier
|
|
|
|
// is attempted to be unsealed.
|
|
|
|
ErrNotInit = errors.New("Vault is not initialized")
|
2015-03-16 22:28:50 +00:00
|
|
|
|
|
|
|
// ErrInternalError is returned when we don't want to leak
|
|
|
|
// any information about an internal error
|
|
|
|
ErrInternalError = errors.New("internal error")
|
2015-04-14 23:53:40 +00:00
|
|
|
|
|
|
|
// ErrHANotEnabled is returned if the operation only makes sense
|
|
|
|
// in an HA setting
|
|
|
|
ErrHANotEnabled = errors.New("Vault is not configured for highly-available mode")
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
// manualStepDownSleepPeriod is how long to sleep after a user-initiated
|
|
|
|
// step down of the active node, to prevent instantly regrabbing the lock.
|
|
|
|
// It's var not const so that tests can manipulate it.
|
|
|
|
manualStepDownSleepPeriod = 10 * time.Second
|
2017-02-17 01:13:19 +00:00
|
|
|
|
|
|
|
// Functions only in the Enterprise version
|
|
|
|
enterprisePostUnseal = enterprisePostUnsealImpl
|
|
|
|
enterprisePreSeal = enterprisePreSealImpl
|
|
|
|
startReplication = startReplicationImpl
|
|
|
|
stopReplication = stopReplicationImpl
|
2017-03-01 17:39:42 +00:00
|
|
|
LastRemoteWAL = lastRemoteWALImpl
|
2015-03-10 00:45:34 +00:00
|
|
|
)
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
// NonFatalError is an error that can be returned during NewCore that should be
|
|
|
|
// displayed but not cause a program exit
|
|
|
|
type NonFatalError struct {
|
|
|
|
Err error
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
func (e *NonFatalError) WrappedErrors() []error {
|
|
|
|
return []error{e.Err}
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
func (e *NonFatalError) Error() string {
|
|
|
|
return e.Err.Error()
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
|
|
|
|
2016-08-24 18:15:25 +00:00
|
|
|
// ErrInvalidKey is returned if there is a user-based error with a provided
|
|
|
|
// unseal key. This will be shown to the user, so should not contain
|
|
|
|
// information that is sensitive.
|
2015-03-12 18:20:27 +00:00
|
|
|
type ErrInvalidKey struct {
|
|
|
|
Reason string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *ErrInvalidKey) Error() string {
|
|
|
|
return fmt.Sprintf("invalid key: %v", e.Reason)
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
type activeAdvertisement struct {
|
2016-08-19 18:49:11 +00:00
|
|
|
RedirectAddr string `json:"redirect_addr"`
|
|
|
|
ClusterAddr string `json:"cluster_addr,omitempty"`
|
|
|
|
ClusterCert []byte `json:"cluster_cert,omitempty"`
|
|
|
|
ClusterKeyParams *clusterKeyParams `json:"cluster_key_params,omitempty"`
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2017-01-17 16:47:06 +00:00
|
|
|
type unlockInformation struct {
|
|
|
|
Parts [][]byte
|
|
|
|
Nonce string
|
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Core is used as the central manager of Vault activity. It is the primary point of
|
|
|
|
// interface for API handlers and is responsible for managing the logical and physical
|
|
|
|
// backends, router, security barrier, and audit trails.
|
|
|
|
type Core struct {
|
2017-02-17 01:13:19 +00:00
|
|
|
// N.B.: This is used to populate a dev token down replication, as
|
|
|
|
// otherwise, after replication is started, a dev would have to go through
|
|
|
|
// the generate-root process simply to talk to the new follower cluster.
|
|
|
|
devToken string
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// HABackend may be available depending on the physical backend
|
|
|
|
ha physical.HABackend
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// redirectAddr is the address we advertise as leader if held
|
|
|
|
redirectAddr string
|
|
|
|
|
|
|
|
// clusterAddr is the address we use for clustering
|
|
|
|
clusterAddr string
|
2015-04-14 23:44:48 +00:00
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// physical backend is the un-trusted backend with durable data
|
|
|
|
physical physical.Backend
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
// Our Seal, for seal configuration information
|
|
|
|
seal Seal
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// barrier is the security barrier wrapping the physical backend
|
|
|
|
barrier SecurityBarrier
|
|
|
|
|
|
|
|
// router is responsible for managing the mount points for logical backends.
|
|
|
|
router *Router
|
2015-03-10 00:45:34 +00:00
|
|
|
|
2015-03-18 22:21:41 +00:00
|
|
|
// logicalBackends is the mapping of backends to use for this core
|
|
|
|
logicalBackends map[string]logical.Factory
|
|
|
|
|
|
|
|
// credentialBackends is the mapping of backends to use for this core
|
2015-03-31 01:07:05 +00:00
|
|
|
credentialBackends map[string]logical.Factory
|
2015-03-15 23:25:38 +00:00
|
|
|
|
2015-03-27 20:45:13 +00:00
|
|
|
// auditBackends is the mapping of backends to use for this core
|
|
|
|
auditBackends map[string]audit.Factory
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
// stateLock protects mutable state
|
|
|
|
stateLock sync.RWMutex
|
|
|
|
sealed bool
|
|
|
|
|
2018-03-07 02:35:58 +00:00
|
|
|
standby bool
|
|
|
|
standbyDoneCh chan struct{}
|
|
|
|
standbyStopCh chan struct{}
|
|
|
|
manualStepDownCh chan struct{}
|
|
|
|
keepHALockOnStepDown uint32
|
|
|
|
heldHALock physical.Lock
|
2015-04-14 21:06:15 +00:00
|
|
|
|
2017-01-17 16:47:06 +00:00
|
|
|
// unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce
|
|
|
|
unlockInfo *unlockInformation
|
2015-03-10 00:45:34 +00:00
|
|
|
|
2016-01-15 15:55:35 +00:00
|
|
|
// generateRootProgress holds the shares until we reach enough
|
2016-01-09 02:21:02 +00:00
|
|
|
// to verify the master key
|
2016-01-15 15:55:35 +00:00
|
|
|
generateRootConfig *GenerateRootConfig
|
|
|
|
generateRootProgress [][]byte
|
|
|
|
generateRootLock sync.Mutex
|
2016-01-09 02:21:02 +00:00
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
// These variables holds the config and shares we have until we reach
|
|
|
|
// enough to verify the appropriate master key. Note that the same lock is
|
|
|
|
// used; this isn't time-critical so this shouldn't be a problem.
|
|
|
|
barrierRekeyConfig *SealConfig
|
|
|
|
barrierRekeyProgress [][]byte
|
|
|
|
recoveryRekeyConfig *SealConfig
|
|
|
|
recoveryRekeyProgress [][]byte
|
|
|
|
rekeyLock sync.RWMutex
|
2015-05-28 18:40:01 +00:00
|
|
|
|
2015-03-11 22:19:41 +00:00
|
|
|
// mounts is loaded after unseal since it is a protected
|
|
|
|
// configuration
|
2015-03-17 22:28:01 +00:00
|
|
|
mounts *MountTable
|
2015-03-11 22:19:41 +00:00
|
|
|
|
2015-11-11 16:44:07 +00:00
|
|
|
// mountsLock is used to ensure that the mounts table does not
|
|
|
|
// change underneath a calling function
|
|
|
|
mountsLock sync.RWMutex
|
|
|
|
|
2015-03-18 22:46:07 +00:00
|
|
|
// auth is loaded after unseal since it is a protected
|
|
|
|
// configuration
|
2015-03-19 16:54:57 +00:00
|
|
|
auth *MountTable
|
2015-03-18 22:46:07 +00:00
|
|
|
|
2015-11-11 16:44:07 +00:00
|
|
|
// authLock is used to ensure that the auth table does not
|
|
|
|
// change underneath a calling function
|
|
|
|
authLock sync.RWMutex
|
|
|
|
|
2015-03-27 20:45:13 +00:00
|
|
|
// audit is loaded after unseal since it is a protected
|
|
|
|
// configuration
|
|
|
|
audit *MountTable
|
|
|
|
|
2015-11-11 16:44:07 +00:00
|
|
|
// auditLock is used to ensure that the audit table does not
|
|
|
|
// change underneath a calling function
|
|
|
|
auditLock sync.RWMutex
|
|
|
|
|
2015-03-31 20:22:40 +00:00
|
|
|
// auditBroker is used to ingest the audit events and fan
|
|
|
|
// out into the configured audit backends
|
|
|
|
auditBroker *AuditBroker
|
|
|
|
|
2017-02-02 19:49:20 +00:00
|
|
|
// auditedHeaders is used to configure which http headers
|
|
|
|
// can be output in the audit logs
|
|
|
|
auditedHeaders *AuditedHeadersConfig
|
|
|
|
|
2017-10-11 17:21:20 +00:00
|
|
|
// systemBackend is the backend which is used to manage internal operations
|
|
|
|
systemBackend *SystemBackend
|
|
|
|
|
2015-09-04 20:58:12 +00:00
|
|
|
// systemBarrierView is the barrier view for the system backend
|
|
|
|
systemBarrierView *BarrierView
|
2015-03-12 19:41:12 +00:00
|
|
|
|
2015-04-08 20:35:32 +00:00
|
|
|
// expiration manager is used for managing LeaseIDs,
|
2015-03-12 19:44:22 +00:00
|
|
|
// renewal, expiration and revocation
|
|
|
|
expiration *ExpirationManager
|
|
|
|
|
2015-03-17 23:23:58 +00:00
|
|
|
// rollback manager is used to run rollbacks periodically
|
|
|
|
rollback *RollbackManager
|
|
|
|
|
2015-03-18 21:00:42 +00:00
|
|
|
// policy store is used to manage named ACL policies
|
2015-11-06 16:52:26 +00:00
|
|
|
policyStore *PolicyStore
|
2015-03-18 21:00:42 +00:00
|
|
|
|
2015-03-23 20:41:05 +00:00
|
|
|
// token store is used to manage authentication tokens
|
|
|
|
tokenStore *TokenStore
|
|
|
|
|
2017-10-11 17:21:20 +00:00
|
|
|
// identityStore is used to manage client entities
|
|
|
|
identityStore *IdentityStore
|
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
// metricsCh is used to stop the metrics streaming
|
|
|
|
metricsCh chan struct{}
|
|
|
|
|
2015-10-12 20:33:54 +00:00
|
|
|
// metricsMutex is used to prevent a race condition between
|
|
|
|
// metrics emission and sealing leading to a nil pointer
|
|
|
|
metricsMutex sync.Mutex
|
|
|
|
|
2015-08-27 14:50:16 +00:00
|
|
|
defaultLeaseTTL time.Duration
|
|
|
|
maxLeaseTTL time.Duration
|
2015-07-30 13:42:49 +00:00
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
logger log.Logger
|
2016-04-21 13:52:42 +00:00
|
|
|
|
2016-04-21 20:32:06 +00:00
|
|
|
// cachingDisabled indicates whether caches are disabled
|
|
|
|
cachingDisabled bool
|
2018-01-26 03:21:51 +00:00
|
|
|
// Cache stores the actual cache; we always have this but may bypass it if
|
|
|
|
// disabled
|
|
|
|
physicalCache physical.ToggleablePurgemonster
|
2016-07-26 06:25:33 +00:00
|
|
|
|
2016-09-30 04:06:40 +00:00
|
|
|
// reloadFuncs is a map containing reload functions
|
2017-07-31 15:28:06 +00:00
|
|
|
reloadFuncs map[string][]reload.ReloadFunc
|
2016-09-30 04:06:40 +00:00
|
|
|
|
2017-07-03 18:54:01 +00:00
|
|
|
// reloadFuncsLock controls access to the funcs
|
2016-09-30 04:06:40 +00:00
|
|
|
reloadFuncsLock sync.RWMutex
|
|
|
|
|
2017-01-04 21:44:03 +00:00
|
|
|
// wrappingJWTKey is the key used for generating JWTs containing response
|
|
|
|
// wrapping information
|
|
|
|
wrappingJWTKey *ecdsa.PrivateKey
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
//
|
|
|
|
// Cluster information
|
|
|
|
//
|
|
|
|
// Name
|
2016-07-26 14:01:35 +00:00
|
|
|
clusterName string
|
2017-08-30 20:28:23 +00:00
|
|
|
// Specific cipher suites to use for clustering, if any
|
|
|
|
clusterCipherSuites []uint16
|
2017-02-17 01:13:19 +00:00
|
|
|
// Used to modify cluster parameters
|
2016-08-15 13:42:42 +00:00
|
|
|
clusterParamsLock sync.RWMutex
|
|
|
|
// The private key stored in the barrier used for establishing
|
|
|
|
// mutually-authenticated connections between Vault cluster members
|
2018-02-23 19:47:07 +00:00
|
|
|
localClusterPrivateKey *atomic.Value
|
2016-08-15 13:42:42 +00:00
|
|
|
// The local cluster cert
|
2018-02-23 19:47:07 +00:00
|
|
|
localClusterCert *atomic.Value
|
2017-03-02 15:03:49 +00:00
|
|
|
// The parsed form of the local cluster cert
|
2018-02-23 19:47:07 +00:00
|
|
|
localClusterParsedCert *atomic.Value
|
2016-08-19 15:03:53 +00:00
|
|
|
// The TCP addresses we should use for clustering
|
|
|
|
clusterListenerAddrs []*net.TCPAddr
|
2017-05-24 14:38:48 +00:00
|
|
|
// The handler to use for request forwarding
|
|
|
|
clusterHandler http.Handler
|
2016-11-11 21:43:33 +00:00
|
|
|
// Tracks whether cluster listeners are running, e.g. it's safe to send a
|
|
|
|
// shutdown down the channel
|
|
|
|
clusterListenersRunning bool
|
2016-08-15 13:42:42 +00:00
|
|
|
// Shutdown channel for the cluster listeners
|
|
|
|
clusterListenerShutdownCh chan struct{}
|
|
|
|
// Shutdown success channel. We need this to be done serially to ensure
|
|
|
|
// that binds are removed before they might be reinstated.
|
|
|
|
clusterListenerShutdownSuccessCh chan struct{}
|
|
|
|
// Write lock used to ensure that we don't have multiple connections adjust
|
|
|
|
// this value at the same time
|
|
|
|
requestForwardingConnectionLock sync.RWMutex
|
2016-08-19 18:49:11 +00:00
|
|
|
// Most recent leader UUID. Used to avoid repeatedly JSON parsing the same
|
|
|
|
// values.
|
|
|
|
clusterLeaderUUID string
|
|
|
|
// Most recent leader redirect addr
|
|
|
|
clusterLeaderRedirectAddr string
|
2017-07-31 22:25:27 +00:00
|
|
|
// Most recent leader cluster addr
|
|
|
|
clusterLeaderClusterAddr string
|
2017-03-02 15:50:54 +00:00
|
|
|
// Lock for the cluster leader values
|
|
|
|
clusterLeaderParamsLock sync.RWMutex
|
2017-05-25 00:51:53 +00:00
|
|
|
// Info on cluster members
|
2017-05-25 01:10:32 +00:00
|
|
|
clusterPeerClusterAddrsCache *cache.Cache
|
2018-01-25 01:23:08 +00:00
|
|
|
// Stores whether we currently have a server running
|
|
|
|
rpcServerActive *uint32
|
2017-05-24 19:06:56 +00:00
|
|
|
// The context for the client
|
|
|
|
rpcClientConnContext context.Context
|
2016-08-19 15:03:53 +00:00
|
|
|
// The function for canceling the client connection
|
|
|
|
rpcClientConnCancelFunc context.CancelFunc
|
|
|
|
// The grpc ClientConn for RPC calls
|
|
|
|
rpcClientConn *grpc.ClientConn
|
|
|
|
// The grpc forwarding client
|
2017-05-24 19:06:56 +00:00
|
|
|
rpcForwardingClient *forwardingClient
|
2017-01-13 19:51:10 +00:00
|
|
|
|
2017-06-17 04:04:55 +00:00
|
|
|
// CORS Information
|
|
|
|
corsConfig *CORSConfig
|
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
// The active set of upstream cluster addresses; stored via the Echo
|
|
|
|
// mechanism, loaded by the balancer
|
|
|
|
atomicPrimaryClusterAddrs *atomic.Value
|
|
|
|
|
|
|
|
atomicPrimaryFailoverAddrs *atomic.Value
|
2017-01-13 19:51:10 +00:00
|
|
|
// replicationState keeps the current replication state cached for quick
|
2018-01-20 00:24:04 +00:00
|
|
|
// lookup; activeNodeReplicationState stores the active value on standbys
|
|
|
|
replicationState *uint32
|
|
|
|
activeNodeReplicationState *uint32
|
2017-02-24 15:45:29 +00:00
|
|
|
|
|
|
|
// uiEnabled indicates whether Vault Web UI is enabled or not
|
|
|
|
uiEnabled bool
|
2017-04-04 00:52:29 +00:00
|
|
|
|
2017-09-15 04:21:35 +00:00
|
|
|
// rawEnabled indicates whether the Raw endpoint is enabled
|
|
|
|
rawEnabled bool
|
|
|
|
|
2017-04-11 00:12:52 +00:00
|
|
|
// pluginDirectory is the location vault will look for plugin binaries
|
2017-04-04 00:52:29 +00:00
|
|
|
pluginDirectory string
|
|
|
|
|
|
|
|
// pluginCatalog is used to manage plugin configurations
|
|
|
|
pluginCatalog *PluginCatalog
|
2017-04-11 00:12:52 +00:00
|
|
|
|
2017-04-24 19:21:49 +00:00
|
|
|
enableMlock bool
|
2017-08-04 20:42:51 +00:00
|
|
|
|
|
|
|
// This can be used to trigger operations to stop running when Vault is
|
|
|
|
// going to be shut down, stepped down, or sealed
|
2018-01-19 06:44:44 +00:00
|
|
|
activeContext context.Context
|
|
|
|
activeContextCancelFunc context.CancelFunc
|
2018-02-09 21:37:40 +00:00
|
|
|
|
|
|
|
// Stores the sealunwrapper for downgrade needs
|
|
|
|
sealUnwrapper physical.Backend
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 18:52:01 +00:00
|
|
|
// CoreConfig is used to parameterize a core
|
|
|
|
type CoreConfig struct {
|
2017-02-17 01:13:19 +00:00
|
|
|
DevToken string `json:"dev_token" structs:"dev_token" mapstructure:"dev_token"`
|
|
|
|
|
2016-07-26 06:25:33 +00:00
|
|
|
LogicalBackends map[string]logical.Factory `json:"logical_backends" structs:"logical_backends" mapstructure:"logical_backends"`
|
|
|
|
|
|
|
|
CredentialBackends map[string]logical.Factory `json:"credential_backends" structs:"credential_backends" mapstructure:"credential_backends"`
|
|
|
|
|
|
|
|
AuditBackends map[string]audit.Factory `json:"audit_backends" structs:"audit_backends" mapstructure:"audit_backends"`
|
|
|
|
|
|
|
|
Physical physical.Backend `json:"physical" structs:"physical" mapstructure:"physical"`
|
|
|
|
|
|
|
|
// May be nil, which disables HA operations
|
|
|
|
HAPhysical physical.HABackend `json:"ha_physical" structs:"ha_physical" mapstructure:"ha_physical"`
|
|
|
|
|
|
|
|
Seal Seal `json:"seal" structs:"seal" mapstructure:"seal"`
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
Logger log.Logger `json:"logger" structs:"logger" mapstructure:"logger"`
|
2016-07-26 06:25:33 +00:00
|
|
|
|
|
|
|
// Disables the LRU cache on the physical backend
|
|
|
|
DisableCache bool `json:"disable_cache" structs:"disable_cache" mapstructure:"disable_cache"`
|
|
|
|
|
|
|
|
// Disables mlock syscall
|
|
|
|
DisableMlock bool `json:"disable_mlock" structs:"disable_mlock" mapstructure:"disable_mlock"`
|
|
|
|
|
2016-08-26 14:27:06 +00:00
|
|
|
// Custom cache size for the LRU cache on the physical backend, or zero for default
|
2016-07-26 06:25:33 +00:00
|
|
|
CacheSize int `json:"cache_size" structs:"cache_size" mapstructure:"cache_size"`
|
|
|
|
|
|
|
|
// Set as the leader address for HA
|
2016-08-15 13:42:42 +00:00
|
|
|
RedirectAddr string `json:"redirect_addr" structs:"redirect_addr" mapstructure:"redirect_addr"`
|
|
|
|
|
|
|
|
// Set as the cluster address for HA
|
|
|
|
ClusterAddr string `json:"cluster_addr" structs:"cluster_addr" mapstructure:"cluster_addr"`
|
2016-07-26 06:25:33 +00:00
|
|
|
|
|
|
|
DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
|
|
|
|
|
|
|
|
MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
|
|
|
|
|
2016-07-26 14:01:35 +00:00
|
|
|
ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"`
|
2016-09-30 04:06:40 +00:00
|
|
|
|
2017-08-30 20:28:23 +00:00
|
|
|
ClusterCipherSuites string `json:"cluster_cipher_suites" structs:"cluster_cipher_suites" mapstructure:"cluster_cipher_suites"`
|
|
|
|
|
2017-02-24 15:45:29 +00:00
|
|
|
EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"`
|
|
|
|
|
2017-09-15 04:21:35 +00:00
|
|
|
// Enable the raw endpoint
|
|
|
|
EnableRaw bool `json:"enable_raw" structs:"enable_raw" mapstructure:"enable_raw"`
|
|
|
|
|
2017-04-21 01:46:41 +00:00
|
|
|
PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"`
|
2017-04-04 00:52:29 +00:00
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
ReloadFuncs *map[string][]reload.ReloadFunc
|
2016-09-30 04:06:40 +00:00
|
|
|
ReloadFuncsLock *sync.RWMutex
|
2015-03-11 18:52:01 +00:00
|
|
|
}
|
|
|
|
|
2015-07-30 13:42:49 +00:00
|
|
|
// NewCore is used to construct a new core
|
2015-03-11 18:52:01 +00:00
|
|
|
func NewCore(conf *CoreConfig) (*Core, error) {
|
2016-08-15 13:42:42 +00:00
|
|
|
if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
|
|
|
|
if conf.RedirectAddr == "" {
|
2018-01-04 15:45:40 +00:00
|
|
|
return nil, fmt.Errorf("missing API address, please set in configuration or via environment")
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2015-04-14 23:44:48 +00:00
|
|
|
}
|
2015-04-14 21:06:15 +00:00
|
|
|
|
2015-08-27 14:50:16 +00:00
|
|
|
if conf.DefaultLeaseTTL == 0 {
|
|
|
|
conf.DefaultLeaseTTL = defaultLeaseTTL
|
2015-07-30 13:42:49 +00:00
|
|
|
}
|
2015-08-27 14:50:16 +00:00
|
|
|
if conf.MaxLeaseTTL == 0 {
|
|
|
|
conf.MaxLeaseTTL = maxLeaseTTL
|
2015-07-30 13:42:49 +00:00
|
|
|
}
|
2015-08-27 14:50:16 +00:00
|
|
|
if conf.DefaultLeaseTTL > conf.MaxLeaseTTL {
|
|
|
|
return nil, fmt.Errorf("cannot have DefaultLeaseTTL larger than MaxLeaseTTL")
|
2015-07-30 13:42:49 +00:00
|
|
|
}
|
2015-08-20 17:14:13 +00:00
|
|
|
|
2015-05-02 20:28:33 +00:00
|
|
|
// Validate the advertise addr if its given to us
|
2016-08-15 13:42:42 +00:00
|
|
|
if conf.RedirectAddr != "" {
|
|
|
|
u, err := url.Parse(conf.RedirectAddr)
|
2015-05-02 20:28:33 +00:00
|
|
|
if err != nil {
|
2016-08-19 14:52:14 +00:00
|
|
|
return nil, fmt.Errorf("redirect address is not valid url: %s", err)
|
2015-05-02 20:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if u.Scheme == "" {
|
2016-08-19 14:52:14 +00:00
|
|
|
return nil, fmt.Errorf("redirect address must include scheme (ex. 'http')")
|
2015-05-02 20:28:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-26 14:27:06 +00:00
|
|
|
// Make a default logger if not provided
|
|
|
|
if conf.Logger == nil {
|
|
|
|
conf.Logger = logformat.NewVaultLogger(log.LevelTrace)
|
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Setup the core
|
|
|
|
c := &Core{
|
2017-02-24 15:45:29 +00:00
|
|
|
devToken: conf.DevToken,
|
|
|
|
physical: conf.Physical,
|
2016-08-19 15:03:53 +00:00
|
|
|
redirectAddr: conf.RedirectAddr,
|
|
|
|
clusterAddr: conf.ClusterAddr,
|
|
|
|
seal: conf.Seal,
|
|
|
|
router: NewRouter(),
|
|
|
|
sealed: true,
|
|
|
|
standby: true,
|
|
|
|
logger: conf.Logger,
|
|
|
|
defaultLeaseTTL: conf.DefaultLeaseTTL,
|
|
|
|
maxLeaseTTL: conf.MaxLeaseTTL,
|
|
|
|
cachingDisabled: conf.DisableCache,
|
|
|
|
clusterName: conf.ClusterName,
|
|
|
|
clusterListenerShutdownCh: make(chan struct{}),
|
|
|
|
clusterListenerShutdownSuccessCh: make(chan struct{}),
|
2018-01-18 04:08:35 +00:00
|
|
|
clusterPeerClusterAddrsCache: cache.New(3*HeartbeatInterval, time.Second),
|
2017-04-24 19:21:49 +00:00
|
|
|
enableMlock: !conf.DisableMlock,
|
2017-09-15 04:21:35 +00:00
|
|
|
rawEnabled: conf.EnableRaw,
|
2018-01-16 18:51:55 +00:00
|
|
|
replicationState: new(uint32),
|
2018-01-25 01:23:08 +00:00
|
|
|
rpcServerActive: new(uint32),
|
2017-10-23 20:03:36 +00:00
|
|
|
atomicPrimaryClusterAddrs: new(atomic.Value),
|
|
|
|
atomicPrimaryFailoverAddrs: new(atomic.Value),
|
2018-02-23 19:47:07 +00:00
|
|
|
localClusterPrivateKey: new(atomic.Value),
|
|
|
|
localClusterCert: new(atomic.Value),
|
|
|
|
localClusterParsedCert: new(atomic.Value),
|
2018-01-20 00:24:04 +00:00
|
|
|
activeNodeReplicationState: new(uint32),
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
2015-03-15 23:25:38 +00:00
|
|
|
|
2018-01-23 02:44:38 +00:00
|
|
|
atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled))
|
2018-02-23 19:47:07 +00:00
|
|
|
c.localClusterCert.Store(([]byte)(nil))
|
|
|
|
c.localClusterParsedCert.Store((*x509.Certificate)(nil))
|
|
|
|
c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil))
|
2018-01-23 02:44:38 +00:00
|
|
|
|
2017-08-30 20:28:23 +00:00
|
|
|
if conf.ClusterCipherSuites != "" {
|
|
|
|
suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err)
|
|
|
|
}
|
|
|
|
c.clusterCipherSuites = suites
|
|
|
|
}
|
|
|
|
|
2017-08-07 14:03:30 +00:00
|
|
|
// Load CORS config and provide a value for the core field.
|
2017-10-23 20:03:36 +00:00
|
|
|
c.corsConfig = &CORSConfig{core: c}
|
2017-06-17 05:26:25 +00:00
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
phys := conf.Physical
|
2017-08-09 00:47:14 +00:00
|
|
|
_, txnOK := conf.Physical.(physical.Transactional)
|
2017-10-23 20:03:36 +00:00
|
|
|
if c.seal == nil {
|
2018-02-23 22:18:48 +00:00
|
|
|
c.seal = NewDefaultSeal()
|
2017-10-23 20:03:36 +00:00
|
|
|
}
|
|
|
|
c.seal.SetCore(c)
|
|
|
|
|
2018-02-09 21:37:40 +00:00
|
|
|
c.sealUnwrapper = NewSealUnwrapper(phys, conf.Logger)
|
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
var ok bool
|
|
|
|
|
|
|
|
// Wrap the physical backend in a cache layer if enabled
|
2018-01-26 03:21:51 +00:00
|
|
|
if txnOK {
|
2018-02-09 21:37:40 +00:00
|
|
|
c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, conf.Logger)
|
2018-01-26 03:21:51 +00:00
|
|
|
} else {
|
2018-02-09 21:37:40 +00:00
|
|
|
c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, conf.Logger)
|
2017-01-06 20:42:18 +00:00
|
|
|
}
|
2018-01-26 03:21:51 +00:00
|
|
|
c.physicalCache = c.physical.(physical.ToggleablePurgemonster)
|
2017-01-06 20:42:18 +00:00
|
|
|
|
2017-02-17 01:13:19 +00:00
|
|
|
if !conf.DisableMlock {
|
|
|
|
// Ensure our memory usage is locked into physical RAM
|
|
|
|
if err := mlock.LockMemory(); err != nil {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"Failed to lock memory: %v\n\n"+
|
|
|
|
"This usually means that the mlock syscall is not available.\n"+
|
|
|
|
"Vault uses mlock to prevent memory from being swapped to\n"+
|
|
|
|
"disk. This requires root privileges as well as a machine\n"+
|
|
|
|
"that supports mlock. Please enable mlock on your system or\n"+
|
|
|
|
"disable Vault from using it. To disable Vault from using it,\n"+
|
|
|
|
"set the `disable_mlock` configuration option in your configuration\n"+
|
|
|
|
"file.",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
2017-04-24 23:20:20 +00:00
|
|
|
if conf.PluginDirectory != "" {
|
|
|
|
c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("core setup failed, could not verify plugin directory: %v", err)
|
|
|
|
}
|
2017-04-04 00:52:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a new AES-GCM barrier
|
2017-02-17 01:13:19 +00:00
|
|
|
c.barrier, err = NewAESGCMBarrier(c.physical)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("barrier setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-07-18 17:19:58 +00:00
|
|
|
if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
|
|
|
|
c.ha = conf.HAPhysical
|
|
|
|
}
|
|
|
|
|
2016-09-30 04:06:40 +00:00
|
|
|
// We create the funcs here, then populate the given config with it so that
|
|
|
|
// the caller can share state
|
|
|
|
conf.ReloadFuncsLock = &c.reloadFuncsLock
|
|
|
|
c.reloadFuncsLock.Lock()
|
2017-07-31 15:28:06 +00:00
|
|
|
c.reloadFuncs = make(map[string][]reload.ReloadFunc)
|
2016-09-30 04:06:40 +00:00
|
|
|
c.reloadFuncsLock.Unlock()
|
|
|
|
conf.ReloadFuncs = &c.reloadFuncs
|
|
|
|
|
2015-03-15 23:25:38 +00:00
|
|
|
// Setup the backends
|
2015-03-18 22:21:41 +00:00
|
|
|
logicalBackends := make(map[string]logical.Factory)
|
|
|
|
for k, f := range conf.LogicalBackends {
|
|
|
|
logicalBackends[k] = f
|
2015-03-15 23:25:38 +00:00
|
|
|
}
|
2017-10-23 20:03:36 +00:00
|
|
|
_, ok = logicalBackends["kv"]
|
2015-09-19 22:24:53 +00:00
|
|
|
if !ok {
|
2017-09-15 13:02:29 +00:00
|
|
|
logicalBackends["kv"] = PassthroughBackendFactory
|
2015-09-19 22:24:53 +00:00
|
|
|
}
|
2015-09-10 01:58:09 +00:00
|
|
|
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
|
2018-01-19 06:44:44 +00:00
|
|
|
logicalBackends["system"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
2017-06-17 05:26:25 +00:00
|
|
|
b := NewSystemBackend(c)
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := b.Setup(ctx, config); err != nil {
|
Backend plugin system (#2874)
* Add backend plugin changes
* Fix totp backend plugin tests
* Fix logical/plugin InvalidateKey test
* Fix plugin catalog CRUD test, fix NoopBackend
* Clean up commented code block
* Fix system backend mount test
* Set plugin_name to omitempty, fix handleMountTable config parsing
* Clean up comments, keep shim connections alive until cleanup
* Include pluginClient, disallow LookupPlugin call from within a plugin
* Add wrapper around backendPluginClient for proper cleanup
* Add logger shim tests
* Add logger, storage, and system shim tests
* Use pointer receivers for system view shim
* Use plugin name if no path is provided on mount
* Enable plugins for auth backends
* Add backend type attribute, move builtin/plugin/package
* Fix merge conflict
* Fix missing plugin name in mount config
* Add integration tests on enabling auth backend plugins
* Remove dependency cycle on mock-plugin
* Add passthrough backend plugin, use logical.BackendType to determine lease generation
* Remove vault package dependency on passthrough package
* Add basic impl test for passthrough plugin
* Incorporate feedback; set b.backend after shims creation on backendPluginServer
* Fix totp plugin test
* Add plugin backends docs
* Fix tests
* Fix builtin/plugin tests
* Remove flatten from PluginRunner fields
* Move mock plugin to logical/plugin, remove totp and passthrough plugins
* Move pluginMap into newPluginClient
* Do not create storage RPC connection on HandleRequest and HandleExistenceCheck
* Change shim logger's Fatal to no-op
* Change BackendType to uint32, match UX backend types
* Change framework.Backend Setup signature
* Add Setup func to logical.Backend interface
* Move OptionallyEnableMlock call into plugin.Serve, update docs and comments
* Remove commented var in plugin package
* RegisterLicense on logical.Backend interface (#3017)
* Add RegisterLicense to logical.Backend interface
* Update RegisterLicense to use callback func on framework.Backend
* Refactor framework.Backend.RegisterLicense
* plugin: Prevent plugin.SystemViewClient.ResponseWrapData from getting JWTs
* plugin: Revert BackendType to remove TypePassthrough and related references
* Fix typo in plugin backends docs
2017-07-20 17:28:40 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return b, nil
|
2015-03-15 23:25:38 +00:00
|
|
|
}
|
2017-10-11 17:21:20 +00:00
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
|
|
|
return NewIdentityStore(ctx, c, config)
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 22:21:41 +00:00
|
|
|
c.logicalBackends = logicalBackends
|
2015-03-15 23:25:38 +00:00
|
|
|
|
2015-03-31 01:07:05 +00:00
|
|
|
credentialBackends := make(map[string]logical.Factory)
|
2015-03-18 22:21:41 +00:00
|
|
|
for k, f := range conf.CredentialBackends {
|
|
|
|
credentialBackends[k] = f
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
|
|
|
return NewTokenStore(ctx, c, config)
|
2015-03-19 02:11:52 +00:00
|
|
|
}
|
2015-03-18 22:21:41 +00:00
|
|
|
c.credentialBackends = credentialBackends
|
2015-03-27 20:45:13 +00:00
|
|
|
|
|
|
|
auditBackends := make(map[string]audit.Factory)
|
|
|
|
for k, f := range conf.AuditBackends {
|
|
|
|
auditBackends[k] = f
|
|
|
|
}
|
|
|
|
c.auditBackends = auditBackends
|
2016-04-04 14:44:22 +00:00
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
return c, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 01:23:59 +00:00
|
|
|
// Shutdown is invoked when the Vault instance is about to be terminated. It
|
|
|
|
// should not be accessible as part of an API call as it will cause an availability
|
|
|
|
// problem. It is only used to gracefully quit in the case of HA so that failover
|
|
|
|
// happens as quickly as possible.
|
|
|
|
func (c *Core) Shutdown() error {
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: shutdown called")
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
// Tell any requests that know about this to stop
|
2018-01-19 06:44:44 +00:00
|
|
|
if c.activeContextCancelFunc != nil {
|
|
|
|
c.activeContextCancelFunc()
|
2015-06-18 01:23:59 +00:00
|
|
|
}
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RUnlock()
|
2015-06-18 01:23:59 +00:00
|
|
|
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: shutdown initiating internal seal")
|
2015-06-18 01:23:59 +00:00
|
|
|
// Seal the Vault, causes a leader stepdown
|
2018-03-06 23:06:09 +00:00
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
2017-08-04 20:42:51 +00:00
|
|
|
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: shutdown running internal seal")
|
|
|
|
return c.sealInternal(false)
|
2015-06-18 01:23:59 +00:00
|
|
|
}
|
|
|
|
|
2017-06-17 04:04:55 +00:00
|
|
|
// CORSConfig returns the current CORS configuration
|
|
|
|
func (c *Core) CORSConfig() *CORSConfig {
|
|
|
|
return c.corsConfig
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) GetContext() (context.Context, context.CancelFunc) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
|
|
|
|
return context.WithCancel(c.activeContext)
|
|
|
|
}
|
|
|
|
|
2016-10-29 21:01:49 +00:00
|
|
|
// LookupToken returns the properties of the token from the token store. This
|
|
|
|
// is particularly useful to fetch the accessor of the client token and get it
|
|
|
|
// populated in the logical request along with the client token. The accessor
|
|
|
|
// of the client token can get audit logged.
|
|
|
|
func (c *Core) LookupToken(token string) (*TokenEntry, error) {
|
|
|
|
if token == "" {
|
|
|
|
return nil, fmt.Errorf("missing client token")
|
|
|
|
}
|
|
|
|
|
2017-01-04 21:52:03 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
2017-02-16 20:15:02 +00:00
|
|
|
return nil, consts.ErrSealed
|
2017-01-04 21:52:03 +00:00
|
|
|
}
|
|
|
|
if c.standby {
|
2017-02-16 20:15:02 +00:00
|
|
|
return nil, consts.ErrStandby
|
2017-01-04 21:52:03 +00:00
|
|
|
}
|
|
|
|
|
2016-10-29 21:01:49 +00:00
|
|
|
// Many tests don't have a token store running
|
|
|
|
if c.tokenStore == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
return c.tokenStore.Lookup(c.activeContext, token)
|
2016-10-29 21:01:49 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 15:20:10 +00:00
|
|
|
// fetchEntityAndDerivedPolicies returns the entity object for the given entity
|
|
|
|
// ID. If the entity is merged into a different entity object, the entity into
|
|
|
|
// which the given entity ID is merged into will be returned. This function
|
|
|
|
// also returns the cumulative list of policies that the entity is entitled to.
|
|
|
|
// This list includes the policies from the entity itself and from all the
|
|
|
|
// groups in which the given entity ID is a member of.
|
|
|
|
func (c *Core) fetchEntityAndDerivedPolicies(entityID string) (*identity.Entity, []string, error) {
|
|
|
|
if entityID == "" {
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//c.logger.Debug("core: entity set on the token", "entity_id", te.EntityID)
|
|
|
|
|
|
|
|
// Fetch the entity
|
|
|
|
entity, err := c.identityStore.MemDBEntityByID(entityID, false)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("core: failed to lookup entity using its ID", "error", err)
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if entity == nil {
|
|
|
|
// If there was no corresponding entity object found, it is
|
|
|
|
// possible that the entity got merged into another entity. Try
|
|
|
|
// finding entity based on the merged entity index.
|
|
|
|
entity, err = c.identityStore.MemDBEntityByMergedEntityID(entityID, false)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("core: failed to lookup entity in merged entity ID index", "error", err)
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var policies []string
|
|
|
|
if entity != nil {
|
|
|
|
//c.logger.Debug("core: entity successfully fetched; adding entity policies to token's policies to create ACL")
|
|
|
|
|
|
|
|
// Attach the policies on the entity
|
|
|
|
policies = append(policies, entity.Policies...)
|
|
|
|
|
|
|
|
groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("core: failed to fetch group policies", "error", err)
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attach the policies from all the groups
|
|
|
|
policies = append(policies, groupPolicies...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return entity, policies, err
|
|
|
|
}
|
|
|
|
|
2017-10-11 17:21:20 +00:00
|
|
|
func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntry, *identity.Entity, error) {
|
2016-01-07 20:10:05 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now())
|
2015-04-08 23:43:17 +00:00
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Ensure there is a client token
|
2017-10-11 17:21:20 +00:00
|
|
|
if clientToken == "" {
|
|
|
|
return nil, nil, nil, fmt.Errorf("missing client token")
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-01 12:21:47 +00:00
|
|
|
if c.tokenStore == nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: token store is unavailable")
|
2017-10-11 17:21:20 +00:00
|
|
|
return nil, nil, nil, ErrInternalError
|
2015-09-01 12:21:47 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Resolve the token policy
|
2018-01-19 06:44:44 +00:00
|
|
|
te, err := c.tokenStore.Lookup(c.activeContext, clientToken)
|
2015-03-31 16:59:02 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to lookup token", "error", err)
|
2017-10-11 17:21:20 +00:00
|
|
|
return nil, nil, nil, ErrInternalError
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the token is valid
|
|
|
|
if te == nil {
|
2017-10-11 17:21:20 +00:00
|
|
|
return nil, nil, nil, logical.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
|
|
|
tokenPolicies := te.Policies
|
|
|
|
|
2017-11-03 15:20:10 +00:00
|
|
|
entity, derivedPolicies, err := c.fetchEntityAndDerivedPolicies(te.EntityID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, ErrInternalError
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2017-11-03 15:20:10 +00:00
|
|
|
tokenPolicies = append(tokenPolicies, derivedPolicies...)
|
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Construct the corresponding ACL object
|
2018-01-19 06:44:44 +00:00
|
|
|
acl, err := c.policyStore.ACL(c.activeContext, tokenPolicies...)
|
2015-03-31 16:59:02 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to construct ACL", "error", err)
|
2017-10-11 17:21:20 +00:00
|
|
|
return nil, nil, nil, ErrInternalError
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2017-10-11 17:21:20 +00:00
|
|
|
return acl, te, entity, nil
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
|
|
|
|
2018-01-08 18:31:38 +00:00
|
|
|
func (c *Core) checkToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *TokenEntry, error) {
|
2016-01-07 20:10:05 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now())
|
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
var acl *ACL
|
|
|
|
var te *TokenEntry
|
|
|
|
var entity *identity.Entity
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// Even if unauth, if a token is provided, there's little reason not to
|
|
|
|
// gather as much info as possible for the audit log and to e.g. control
|
|
|
|
// trace mode for EGPs.
|
|
|
|
if !unauth || (unauth && req.ClientToken != "") {
|
|
|
|
acl, te, entity, err = c.fetchACLTokenEntryAndEntity(req.ClientToken)
|
|
|
|
// In the unauth case we don't want to fail the command, since it's
|
|
|
|
// unauth, we just have no information to attach to the request, so
|
|
|
|
// ignore errors...this was best-effort anyways
|
|
|
|
if err != nil && !unauth {
|
|
|
|
return nil, te, err
|
|
|
|
}
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Check if this is a root protected path
|
2016-01-07 20:10:05 +00:00
|
|
|
rootPath := c.router.RootPath(req.Path)
|
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
if rootPath && unauth {
|
|
|
|
return nil, nil, errors.New("cannot access root path in unauthenticated request")
|
|
|
|
}
|
|
|
|
|
2016-01-12 20:13:54 +00:00
|
|
|
// When we receive a write of either type, rather than require clients to
|
|
|
|
// PUT/POST and trust the operation, we ask the backend to give us the real
|
|
|
|
// skinny -- if the backend implements an existence check, it can tell us
|
|
|
|
// whether a particular resource exists. Then we can mark it as an update
|
|
|
|
// or creation as appropriate.
|
2016-01-07 20:10:05 +00:00
|
|
|
if req.Operation == logical.CreateOperation || req.Operation == logical.UpdateOperation {
|
2018-01-08 18:31:38 +00:00
|
|
|
checkExists, resourceExists, err := c.router.RouteExistenceCheck(ctx, req)
|
2016-01-23 19:05:09 +00:00
|
|
|
switch err {
|
|
|
|
case logical.ErrUnsupportedPath:
|
|
|
|
// fail later via bad path to avoid confusing items in the log
|
|
|
|
checkExists = false
|
|
|
|
case nil:
|
|
|
|
// Continue on
|
|
|
|
default:
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to run existence check", "error", err)
|
2016-07-28 19:19:27 +00:00
|
|
|
if _, ok := err.(errutil.UserError); ok {
|
|
|
|
return nil, nil, err
|
|
|
|
} else {
|
|
|
|
return nil, nil, ErrInternalError
|
|
|
|
}
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
2016-01-12 20:09:16 +00:00
|
|
|
case checkExists == false:
|
2016-01-07 20:10:05 +00:00
|
|
|
// No existence check, so always treate it as an update operation, which is how it is pre 0.5
|
|
|
|
req.Operation = logical.UpdateOperation
|
2016-01-12 20:09:16 +00:00
|
|
|
case resourceExists == true:
|
2016-01-07 20:10:05 +00:00
|
|
|
// It exists, so force an update operation
|
|
|
|
req.Operation = logical.UpdateOperation
|
2016-01-12 20:09:16 +00:00
|
|
|
case resourceExists == false:
|
2016-01-07 20:10:05 +00:00
|
|
|
// It doesn't exist, force a create operation
|
|
|
|
req.Operation = logical.CreateOperation
|
|
|
|
default:
|
2016-01-12 20:09:16 +00:00
|
|
|
panic("unreachable code")
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
2017-06-05 22:04:31 +00:00
|
|
|
// Create the auth response
|
|
|
|
auth := &logical.Auth{
|
|
|
|
ClientToken: req.ClientToken,
|
|
|
|
Accessor: req.ClientTokenAccessor,
|
2017-10-11 17:21:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if te != nil {
|
|
|
|
auth.Policies = te.Policies
|
|
|
|
auth.Metadata = te.Meta
|
|
|
|
auth.DisplayName = te.DisplayName
|
|
|
|
auth.EntityID = te.EntityID
|
|
|
|
// Store the entity ID in the request object
|
|
|
|
req.EntityID = te.EntityID
|
2017-06-05 22:04:31 +00:00
|
|
|
}
|
2015-03-31 16:59:02 +00:00
|
|
|
|
2016-05-02 07:11:14 +00:00
|
|
|
// Check the standard non-root ACLs. Return the token entry if it's not
|
|
|
|
// allowed so we can decrement the use count.
|
2018-01-19 07:43:36 +00:00
|
|
|
authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
|
2017-10-23 20:03:36 +00:00
|
|
|
Unauth: unauth,
|
|
|
|
RootPrivsRequired: rootPath,
|
|
|
|
})
|
|
|
|
if authResults.Error.ErrorOrNil() != nil {
|
|
|
|
return auth, te, authResults.Error
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
2017-10-23 20:03:36 +00:00
|
|
|
if !authResults.Allowed {
|
2017-06-05 22:04:31 +00:00
|
|
|
// Return auth for audit logging even if not allowed
|
|
|
|
return auth, te, logical.ErrPermissionDenied
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 17:14:13 +00:00
|
|
|
return auth, te, nil
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Sealed checks if the Vault is current sealed
|
|
|
|
func (c *Core) Sealed() (bool, error) {
|
2015-03-10 00:45:34 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
return c.sealed, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 21:09:11 +00:00
|
|
|
// Standby checks if the Vault is in standby mode
|
|
|
|
func (c *Core) Standby() (bool, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
2015-04-14 23:11:39 +00:00
|
|
|
return c.standby, nil
|
2015-04-14 21:09:11 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 23:53:40 +00:00
|
|
|
// Leader is used to get the current active leader
|
2017-07-31 22:25:27 +00:00
|
|
|
func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) {
|
2015-04-14 23:53:40 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
|
2015-04-20 19:19:09 +00:00
|
|
|
// Check if sealed
|
|
|
|
if c.sealed {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", consts.ErrSealed
|
2015-04-20 19:19:09 +00:00
|
|
|
}
|
|
|
|
|
2017-02-06 23:29:56 +00:00
|
|
|
// Check if HA enabled
|
|
|
|
if c.ha == nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", ErrHANotEnabled
|
2017-02-06 23:29:56 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 23:53:40 +00:00
|
|
|
// Check if we are the leader
|
|
|
|
if !c.standby {
|
2017-07-31 22:25:27 +00:00
|
|
|
return true, c.redirectAddr, c.clusterAddr, nil
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize a lock
|
|
|
|
lock, err := c.ha.LockWith(coreLockPath, "read")
|
|
|
|
if err != nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", err
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read the value
|
2016-08-19 18:49:11 +00:00
|
|
|
held, leaderUUID, err := lock.Value()
|
2015-04-14 23:53:40 +00:00
|
|
|
if err != nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", err
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
if !held {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", nil
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
|
2017-03-02 15:50:54 +00:00
|
|
|
c.clusterLeaderParamsLock.RLock()
|
2017-03-02 15:03:49 +00:00
|
|
|
localLeaderUUID := c.clusterLeaderUUID
|
|
|
|
localRedirAddr := c.clusterLeaderRedirectAddr
|
2017-07-31 22:25:27 +00:00
|
|
|
localClusterAddr := c.clusterLeaderClusterAddr
|
2017-03-02 15:50:54 +00:00
|
|
|
c.clusterLeaderParamsLock.RUnlock()
|
2017-03-02 15:03:49 +00:00
|
|
|
|
2016-08-19 18:49:11 +00:00
|
|
|
// If the leader hasn't changed, return the cached value; nothing changes
|
|
|
|
// mid-leadership, and the barrier caches anyways
|
2017-03-02 15:03:49 +00:00
|
|
|
if leaderUUID == localLeaderUUID && localRedirAddr != "" {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, localRedirAddr, localClusterAddr, nil
|
2016-08-19 18:49:11 +00:00
|
|
|
}
|
|
|
|
|
2017-03-02 15:21:35 +00:00
|
|
|
c.logger.Trace("core: found new active node information, refreshing")
|
|
|
|
|
2017-03-02 15:50:54 +00:00
|
|
|
c.clusterLeaderParamsLock.Lock()
|
|
|
|
defer c.clusterLeaderParamsLock.Unlock()
|
|
|
|
|
|
|
|
// Validate base conditions again
|
|
|
|
if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, localRedirAddr, localClusterAddr, nil
|
2017-03-02 15:50:54 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 18:49:11 +00:00
|
|
|
key := coreLeaderPrefix + leaderUUID
|
2018-01-19 06:44:44 +00:00
|
|
|
// Use background because postUnseal isn't run on standby
|
|
|
|
entry, err := c.barrier.Get(context.Background(), key)
|
2015-04-14 23:53:40 +00:00
|
|
|
if err != nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", err
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
if entry == nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", nil
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
var oldAdv bool
|
|
|
|
|
|
|
|
var adv activeAdvertisement
|
|
|
|
err = jsonutil.DecodeJSON(entry.Value, &adv)
|
|
|
|
if err != nil {
|
|
|
|
// Fall back to pre-struct handling
|
2016-08-19 18:49:11 +00:00
|
|
|
adv.RedirectAddr = string(entry.Value)
|
2017-03-02 15:21:35 +00:00
|
|
|
c.logger.Trace("core: parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
oldAdv = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !oldAdv {
|
2017-03-02 15:21:35 +00:00
|
|
|
c.logger.Trace("core: parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr)
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// Ensure we are using current values
|
2017-01-06 20:42:18 +00:00
|
|
|
err = c.loadLocalClusterTLS(adv)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", err
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This will ensure that we both have a connection at the ready and that
|
|
|
|
// the address is the current known value
|
2018-01-19 09:11:59 +00:00
|
|
|
// Since this is standby, we don't use the active context. Later we may
|
|
|
|
// use a process-scoped context
|
|
|
|
err = c.refreshRequestForwardingConnection(context.Background(), adv.ClusterAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, "", "", err
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-19 18:49:11 +00:00
|
|
|
// Don't set these until everything has been parsed successfully or we'll
|
2017-03-02 15:03:49 +00:00
|
|
|
// never try again
|
|
|
|
c.clusterLeaderRedirectAddr = adv.RedirectAddr
|
2017-07-31 22:25:27 +00:00
|
|
|
c.clusterLeaderClusterAddr = adv.ClusterAddr
|
2017-03-02 15:50:54 +00:00
|
|
|
c.clusterLeaderUUID = leaderUUID
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2017-07-31 22:25:27 +00:00
|
|
|
return false, adv.RedirectAddr, adv.ClusterAddr, nil
|
2015-04-14 23:53:40 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 18:52:01 +00:00
|
|
|
// SecretProgress returns the number of keys provided so far
|
2017-01-17 16:47:06 +00:00
|
|
|
func (c *Core) SecretProgress() (int, string) {
|
2015-03-11 18:52:01 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
2017-01-17 16:47:06 +00:00
|
|
|
switch c.unlockInfo {
|
|
|
|
case nil:
|
|
|
|
return 0, ""
|
|
|
|
default:
|
|
|
|
return len(c.unlockInfo.Parts), c.unlockInfo.Nonce
|
|
|
|
}
|
2015-03-11 18:52:01 +00:00
|
|
|
}
|
|
|
|
|
2015-10-28 19:59:39 +00:00
|
|
|
// ResetUnsealProcess removes the current unlock parts from memory, to reset
|
|
|
|
// the unsealing process
|
|
|
|
func (c *Core) ResetUnsealProcess() {
|
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
if !c.sealed {
|
|
|
|
return
|
|
|
|
}
|
2017-01-17 16:47:06 +00:00
|
|
|
c.unlockInfo = nil
|
2015-10-28 19:59:39 +00:00
|
|
|
}
|
|
|
|
|
2015-03-15 00:47:11 +00:00
|
|
|
// Unseal is used to provide one of the key parts to unseal the Vault.
|
2015-03-15 01:25:36 +00:00
|
|
|
//
|
|
|
|
// They key given as a parameter will automatically be zerod after
|
|
|
|
// this method is done with it. If you want to keep the key around, a copy
|
|
|
|
// should be made.
|
|
|
|
func (c *Core) Unseal(key []byte) (bool, error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// Explicitly check for init status. This also checks if the seal
|
|
|
|
// configuration is valid (i.e. non-nil).
|
2018-01-19 06:44:44 +00:00
|
|
|
init, err := c.Initialized(ctx)
|
2017-11-07 20:15:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if !init {
|
|
|
|
return false, ErrNotInit
|
|
|
|
}
|
|
|
|
|
2015-03-12 18:20:27 +00:00
|
|
|
// Verify the key length
|
|
|
|
min, max := c.barrier.KeyLength()
|
|
|
|
max += shamir.ShareOverhead
|
|
|
|
if len(key) < min {
|
|
|
|
return false, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
|
|
|
|
}
|
|
|
|
if len(key) > max {
|
|
|
|
return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
|
|
|
|
}
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// Get the barrier seal configuration
|
2018-01-19 06:44:44 +00:00
|
|
|
config, err := c.seal.BarrierConfig(ctx)
|
2015-03-11 18:43:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// Check if already unsealed
|
|
|
|
if !c.sealed {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
masterKey, err := c.unsealPart(ctx, config, key, false)
|
2017-11-07 20:15:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if masterKey != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
return c.unsealInternal(ctx, masterKey)
|
2017-11-07 20:15:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnsealWithRecoveryKeys is used to provide one of the recovery key shares to
|
|
|
|
// unseal the Vault.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) UnsealWithRecoveryKeys(ctx context.Context, key []byte) (bool, error) {
|
2017-11-14 03:05:22 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "unseal_with_recovery_keys"}, time.Now())
|
|
|
|
|
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// Explicitly check for init status
|
2018-01-19 06:44:44 +00:00
|
|
|
init, err := c.Initialized(ctx)
|
2017-11-07 20:15:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if !init {
|
2015-03-11 18:43:36 +00:00
|
|
|
return false, ErrNotInit
|
|
|
|
}
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
var config *SealConfig
|
|
|
|
// If recovery keys are supported then use recovery seal config to unseal
|
2018-01-19 08:44:06 +00:00
|
|
|
if c.seal.RecoveryKeySupported() {
|
2018-01-19 06:44:44 +00:00
|
|
|
config, err = c.seal.RecoveryConfig(ctx)
|
2017-11-07 20:15:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-11 18:43:36 +00:00
|
|
|
// Check if already unsealed
|
|
|
|
if !c.sealed {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
masterKey, err := c.unsealPart(ctx, config, key, true)
|
2017-02-17 01:13:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if masterKey != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
return c.unsealInternal(ctx, masterKey)
|
2017-02-17 01:13:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// unsealPart takes in a key share, and returns the master key if the threshold
|
|
|
|
// is met. If recovery keys are supported, recovery key shares may be provided.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) unsealPart(ctx context.Context, config *SealConfig, key []byte, useRecoveryKeys bool) ([]byte, error) {
|
2015-03-11 18:43:36 +00:00
|
|
|
// Check if we already have this piece
|
2017-01-17 16:47:06 +00:00
|
|
|
if c.unlockInfo != nil {
|
|
|
|
for _, existing := range c.unlockInfo.Parts {
|
2017-02-17 01:13:19 +00:00
|
|
|
if subtle.ConstantTimeCompare(existing, key) == 1 {
|
|
|
|
return nil, nil
|
2017-01-17 16:47:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uuid, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
2017-02-17 01:13:19 +00:00
|
|
|
return nil, err
|
2017-01-17 16:47:06 +00:00
|
|
|
}
|
|
|
|
c.unlockInfo = &unlockInformation{
|
|
|
|
Nonce: uuid,
|
2015-03-11 18:43:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store this key
|
2017-01-17 16:47:06 +00:00
|
|
|
c.unlockInfo.Parts = append(c.unlockInfo.Parts, key)
|
2015-03-11 18:43:36 +00:00
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// Check if we don't have enough keys to unlock, proceed through the rest of
|
|
|
|
// the call only if we have met the threshold
|
2017-01-17 16:47:06 +00:00
|
|
|
if len(c.unlockInfo.Parts) < config.SecretThreshold {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsDebug() {
|
2017-01-17 16:47:06 +00:00
|
|
|
c.logger.Debug("core: cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce)
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
return nil, nil
|
2015-03-11 18:43:36 +00:00
|
|
|
}
|
|
|
|
|
2017-02-17 01:13:19 +00:00
|
|
|
// Best-effort memzero of unlock parts once we're done with them
|
|
|
|
defer func() {
|
2017-11-07 20:15:39 +00:00
|
|
|
for i := range c.unlockInfo.Parts {
|
2017-02-17 01:13:19 +00:00
|
|
|
memzero(c.unlockInfo.Parts[i])
|
|
|
|
}
|
|
|
|
c.unlockInfo = nil
|
|
|
|
}()
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// Recover the split key. recoveredKey is the shamir combined
|
|
|
|
// key, or the single provided key if the threshold is 1.
|
|
|
|
var recoveredKey []byte
|
2017-02-17 01:13:19 +00:00
|
|
|
var err error
|
2015-03-11 18:43:36 +00:00
|
|
|
if config.SecretThreshold == 1 {
|
2017-11-07 20:15:39 +00:00
|
|
|
recoveredKey = make([]byte, len(c.unlockInfo.Parts[0]))
|
|
|
|
copy(recoveredKey, c.unlockInfo.Parts[0])
|
2015-03-11 18:43:36 +00:00
|
|
|
} else {
|
2017-11-07 20:15:39 +00:00
|
|
|
recoveredKey, err = shamir.Combine(c.unlockInfo.Parts)
|
2015-03-11 18:43:36 +00:00
|
|
|
if err != nil {
|
2017-02-17 01:13:19 +00:00
|
|
|
return nil, fmt.Errorf("failed to compute master key: %v", err)
|
2015-03-11 18:43:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-19 08:44:06 +00:00
|
|
|
if c.seal.RecoveryKeySupported() && useRecoveryKeys {
|
2017-11-07 20:15:39 +00:00
|
|
|
// Verify recovery key
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
|
2017-11-07 20:15:39 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get stored keys and shamir combine into single master key. Unsealing with
|
|
|
|
// recovery keys currently does not support: 1) mixed stored and non-stored
|
|
|
|
// keys setup, nor 2) seals that support recovery keys but not stored keys.
|
|
|
|
// If insuffiencient shares are provided, shamir.Combine will error, and if
|
|
|
|
// no stored keys are found it will return masterKey as nil.
|
|
|
|
var masterKey []byte
|
2018-01-19 08:44:06 +00:00
|
|
|
if c.seal.StoredKeysSupported() {
|
2018-01-19 06:44:44 +00:00
|
|
|
masterKeyShares, err := c.seal.GetStoredKeys(ctx)
|
2017-11-07 20:15:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve stored keys: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(masterKeyShares) == 1 {
|
|
|
|
return masterKeyShares[0], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
masterKey, err = shamir.Combine(masterKeyShares)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to compute master key: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return masterKey, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is not a recovery key-supported seal, then the recovered key is
|
|
|
|
// the master key to be returned.
|
|
|
|
return recoveredKey, nil
|
2017-01-06 21:30:43 +00:00
|
|
|
}
|
|
|
|
|
2017-11-07 20:15:39 +00:00
|
|
|
// unsealInternal takes in the master key and attempts to unseal the barrier.
|
|
|
|
// N.B.: This must be called with the state write lock held.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, error) {
|
2017-02-17 01:13:19 +00:00
|
|
|
defer memzero(masterKey)
|
|
|
|
|
2015-03-11 18:43:36 +00:00
|
|
|
// Attempt to unlock
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.barrier.Unseal(ctx, masterKey); err != nil {
|
2015-03-11 18:43:36 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsInfo() {
|
|
|
|
c.logger.Info("core: vault is unsealed")
|
|
|
|
}
|
2015-03-11 18:43:36 +00:00
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Do post-unseal setup if HA is not enabled
|
|
|
|
if c.ha == nil {
|
2016-08-15 13:42:42 +00:00
|
|
|
// We still need to set up cluster info even if it's not part of a
|
2017-01-04 21:44:03 +00:00
|
|
|
// cluster right now. This also populates the cached cluster object.
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupCluster(ctx); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: cluster setup failed", "error", err)
|
2016-08-15 13:42:42 +00:00
|
|
|
c.barrier.Seal()
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("core: vault is sealed")
|
2016-08-15 13:42:42 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
if err := c.postUnseal(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: post-unseal setup failed", "error", err)
|
2015-04-14 21:06:15 +00:00
|
|
|
c.barrier.Seal()
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("core: vault is sealed")
|
2015-04-14 21:06:15 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
|
2015-12-17 18:48:08 +00:00
|
|
|
c.standby = false
|
2015-04-14 21:06:15 +00:00
|
|
|
} else {
|
|
|
|
// Go to standby mode, wait until we are active to unseal
|
|
|
|
c.standbyDoneCh = make(chan struct{})
|
2016-02-29 02:35:32 +00:00
|
|
|
c.manualStepDownCh = make(chan struct{})
|
2018-03-07 02:35:58 +00:00
|
|
|
c.standbyStopCh = make(chan struct{})
|
|
|
|
go c.runStandby(c.standbyDoneCh, c.manualStepDownCh, c.standbyStopCh)
|
2015-03-11 22:19:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 18:43:36 +00:00
|
|
|
// Success!
|
|
|
|
c.sealed = false
|
2017-12-19 21:06:48 +00:00
|
|
|
|
|
|
|
// Force a cache bust here, which will also run migration code
|
2018-01-19 08:44:06 +00:00
|
|
|
if c.seal.RecoveryKeySupported() {
|
2018-01-19 06:44:44 +00:00
|
|
|
c.seal.SetRecoveryConfig(ctx, nil)
|
2017-12-19 21:06:48 +00:00
|
|
|
}
|
|
|
|
|
2016-04-23 02:55:17 +00:00
|
|
|
if c.ha != nil {
|
|
|
|
sd, ok := c.ha.(physical.ServiceDiscovery)
|
|
|
|
if ok {
|
2016-04-28 17:56:41 +00:00
|
|
|
if err := sd.NotifySealedStateChange(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("core: failed to notify unsealed status", "error", err)
|
|
|
|
}
|
2016-04-28 17:56:41 +00:00
|
|
|
}
|
2016-04-23 02:55:17 +00:00
|
|
|
}
|
|
|
|
}
|
2015-03-11 18:43:36 +00:00
|
|
|
return true, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
2015-03-10 00:45:34 +00:00
|
|
|
|
2016-05-20 17:03:54 +00:00
|
|
|
// SealWithRequest takes in a logical.Request, acquires the lock, and passes
|
|
|
|
// through to sealInternal
|
|
|
|
func (c *Core) SealWithRequest(req *logical.Request) error {
|
|
|
|
defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now())
|
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RLock()
|
2016-05-20 17:03:54 +00:00
|
|
|
|
|
|
|
if c.sealed {
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-20 17:03:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
// This will unlock the read lock
|
2018-01-19 06:44:44 +00:00
|
|
|
// We use background context since we may not be active
|
|
|
|
return c.sealInitCommon(context.Background(), req)
|
2016-05-20 17:03:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Seal takes in a token and creates a logical.Request, acquires the lock, and
|
|
|
|
// passes through to sealInternal
|
|
|
|
func (c *Core) Seal(token string) error {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "seal"}, time.Now())
|
2016-02-27 00:43:55 +00:00
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RLock()
|
2016-05-16 20:11:33 +00:00
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
if c.sealed {
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-20 17:03:54 +00:00
|
|
|
return nil
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-03-31 16:59:02 +00:00
|
|
|
|
2016-01-07 20:10:05 +00:00
|
|
|
req := &logical.Request{
|
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
Path: "sys/seal",
|
|
|
|
ClientToken: token,
|
|
|
|
}
|
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
// This will unlock the read lock
|
2018-01-19 06:44:44 +00:00
|
|
|
// We use background context since we may not be active
|
|
|
|
return c.sealInitCommon(context.Background(), req)
|
2016-05-20 17:03:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// sealInitCommon is common logic for Seal and SealWithRequest and is used to
|
|
|
|
// re-seal the Vault. This requires the Vault to be unsealed again to perform
|
2017-08-04 20:42:51 +00:00
|
|
|
// any further operations. Note: this function will read-unlock the state lock.
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr error) {
|
2016-05-20 17:03:54 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now())
|
|
|
|
|
|
|
|
if req == nil {
|
|
|
|
retErr = multierror.Append(retErr, errors.New("nil request to seal"))
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-20 17:03:54 +00:00
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate the token is a root token
|
2017-10-23 20:03:36 +00:00
|
|
|
acl, te, entity, err := c.fetchACLTokenEntryAndEntity(req.ClientToken)
|
2015-03-31 16:59:02 +00:00
|
|
|
if err != nil {
|
2016-02-03 16:35:47 +00:00
|
|
|
// Since there is no token store in standby nodes, sealing cannot
|
|
|
|
// be done. Ideally, the request has to be forwarded to leader node
|
|
|
|
// for validation and the operation should be performed. But for now,
|
|
|
|
// just returning with an error and recommending a vault restart, which
|
|
|
|
// essentially does the same thing.
|
2016-02-03 15:58:33 +00:00
|
|
|
if c.standby {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: vault cannot seal when in standby mode; please restart instead")
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead"))
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-02-03 15:58:33 +00:00
|
|
|
}
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, err)
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
2016-05-20 17:03:54 +00:00
|
|
|
|
|
|
|
// Audit-log the request before going any further
|
|
|
|
auth := &logical.Auth{
|
|
|
|
ClientToken: req.ClientToken,
|
|
|
|
Policies: te.Policies,
|
|
|
|
Metadata: te.Meta,
|
|
|
|
DisplayName: te.DisplayName,
|
2017-10-18 17:23:05 +00:00
|
|
|
EntityID: te.EntityID,
|
2016-05-20 17:03:54 +00:00
|
|
|
}
|
|
|
|
|
2018-03-02 17:18:39 +00:00
|
|
|
logInput := &audit.LogInput{
|
|
|
|
Auth: auth,
|
|
|
|
Request: req,
|
|
|
|
}
|
|
|
|
if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
|
2016-05-20 17:03:54 +00:00
|
|
|
retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-20 17:03:54 +00:00
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
2016-02-29 02:35:32 +00:00
|
|
|
// Attempt to use the token (decrement num_uses)
|
2016-05-02 07:11:14 +00:00
|
|
|
// On error bail out; if the token has been revoked, bail out too
|
2016-02-29 02:35:32 +00:00
|
|
|
if te != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
te, err = c.tokenStore.UseToken(ctx, te)
|
2016-05-02 07:11:14 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to use token", "error", err)
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, ErrInternalError)
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-05-02 07:11:14 +00:00
|
|
|
}
|
|
|
|
if te == nil {
|
|
|
|
// Token is no longer valid
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-05-02 07:11:14 +00:00
|
|
|
}
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
2015-03-31 16:59:02 +00:00
|
|
|
|
2016-01-07 20:10:05 +00:00
|
|
|
// Verify that this operation is allowed
|
2018-01-19 07:43:36 +00:00
|
|
|
authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
|
2017-10-23 20:03:36 +00:00
|
|
|
RootPrivsRequired: true,
|
|
|
|
})
|
|
|
|
if authResults.Error.ErrorOrNil() != nil {
|
|
|
|
retErr = multierror.Append(retErr, authResults.Error)
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
2017-10-23 20:03:36 +00:00
|
|
|
if !authResults.Allowed {
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
|
2017-08-04 20:49:31 +00:00
|
|
|
c.stateLock.RUnlock()
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-01-07 20:10:05 +00:00
|
|
|
}
|
|
|
|
|
2017-11-02 13:47:02 +00:00
|
|
|
if te != nil && te.NumUses == -1 {
|
|
|
|
// Token needs to be revoked. We do this immediately here because
|
|
|
|
// we won't have a token store after sealing.
|
2018-01-19 06:44:44 +00:00
|
|
|
err = c.tokenStore.Revoke(c.activeContext, te.ID)
|
2017-11-02 13:47:02 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("core: token needed revocation before seal but failed to revoke", "error", err)
|
|
|
|
retErr = multierror.Append(retErr, ErrInternalError)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
// Tell any requests that know about this to stop
|
2018-01-19 06:44:44 +00:00
|
|
|
if c.activeContextCancelFunc != nil {
|
|
|
|
c.activeContextCancelFunc()
|
2017-08-04 20:42:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock from the request handling
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
|
2016-02-29 02:35:32 +00:00
|
|
|
//Seal the Vault
|
2018-03-06 23:06:09 +00:00
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
sealErr := c.sealInternal(false)
|
2017-08-04 20:42:51 +00:00
|
|
|
|
2018-03-06 23:06:09 +00:00
|
|
|
if sealErr != nil {
|
|
|
|
retErr = multierror.Append(retErr, sealErr)
|
2015-08-20 17:37:42 +00:00
|
|
|
}
|
|
|
|
|
2018-03-06 23:06:09 +00:00
|
|
|
return
|
2015-06-18 01:23:59 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 02:35:32 +00:00
|
|
|
// StepDown is used to step down from leadership
|
2016-05-20 17:03:54 +00:00
|
|
|
func (c *Core) StepDown(req *logical.Request) (retErr error) {
|
2016-02-29 02:35:32 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "step_down"}, time.Now())
|
|
|
|
|
2016-05-20 17:03:54 +00:00
|
|
|
if req == nil {
|
|
|
|
retErr = multierror.Append(retErr, errors.New("nil request to step-down"))
|
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
2016-02-29 02:35:32 +00:00
|
|
|
if c.sealed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if c.ha == nil || c.standby {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
ctx := c.activeContext
|
|
|
|
|
2017-10-23 20:03:36 +00:00
|
|
|
acl, te, entity, err := c.fetchACLTokenEntryAndEntity(req.ClientToken)
|
2016-02-29 02:35:32 +00:00
|
|
|
if err != nil {
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, err)
|
|
|
|
return retErr
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
2016-05-20 17:03:54 +00:00
|
|
|
|
|
|
|
// Audit-log the request before going any further
|
|
|
|
auth := &logical.Auth{
|
|
|
|
ClientToken: req.ClientToken,
|
|
|
|
Policies: te.Policies,
|
|
|
|
Metadata: te.Meta,
|
|
|
|
DisplayName: te.DisplayName,
|
2017-10-18 17:23:05 +00:00
|
|
|
EntityID: te.EntityID,
|
2016-05-20 17:03:54 +00:00
|
|
|
}
|
|
|
|
|
2018-03-02 17:18:39 +00:00
|
|
|
logInput := &audit.LogInput{
|
|
|
|
Auth: auth,
|
|
|
|
Request: req,
|
|
|
|
}
|
|
|
|
if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
|
2016-05-20 17:03:54 +00:00
|
|
|
retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
|
|
|
|
return retErr
|
|
|
|
}
|
|
|
|
|
2016-02-29 02:35:32 +00:00
|
|
|
// Attempt to use the token (decrement num_uses)
|
|
|
|
if te != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
te, err = c.tokenStore.UseToken(ctx, te)
|
2016-05-02 07:11:14 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to use token", "error", err)
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, ErrInternalError)
|
|
|
|
return retErr
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
2016-05-02 07:11:14 +00:00
|
|
|
if te == nil {
|
|
|
|
// Token has been revoked
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
|
|
|
|
return retErr
|
2016-05-02 07:11:14 +00:00
|
|
|
}
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that this operation is allowed
|
2018-01-19 07:43:36 +00:00
|
|
|
authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
|
2017-10-23 20:03:36 +00:00
|
|
|
RootPrivsRequired: true,
|
|
|
|
})
|
|
|
|
if authResults.Error.ErrorOrNil() != nil {
|
|
|
|
retErr = multierror.Append(retErr, authResults.Error)
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
2017-10-23 20:03:36 +00:00
|
|
|
if !authResults.Allowed {
|
2016-05-16 20:11:33 +00:00
|
|
|
retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
|
|
|
|
return retErr
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
|
|
|
|
2017-11-02 13:47:02 +00:00
|
|
|
if te != nil && te.NumUses == -1 {
|
|
|
|
// Token needs to be revoked. We do this immediately here because
|
|
|
|
// we won't have a token store after sealing.
|
2018-01-19 06:44:44 +00:00
|
|
|
err = c.tokenStore.Revoke(c.activeContext, te.ID)
|
2017-11-02 13:47:02 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("core: token needed revocation before step-down but failed to revoke", "error", err)
|
|
|
|
retErr = multierror.Append(retErr, ErrInternalError)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-03 17:29:30 +00:00
|
|
|
select {
|
|
|
|
case c.manualStepDownCh <- struct{}{}:
|
|
|
|
default:
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("core: manual step-down operation already queued")
|
2016-03-03 17:29:30 +00:00
|
|
|
}
|
2016-02-29 02:35:32 +00:00
|
|
|
|
2016-05-16 20:11:33 +00:00
|
|
|
return retErr
|
2016-02-29 02:35:32 +00:00
|
|
|
}
|
|
|
|
|
2016-02-27 00:43:55 +00:00
|
|
|
// sealInternal is an internal method used to seal the vault. It does not do
|
|
|
|
// any authorization checking. The stateLock must be held prior to calling.
|
2018-03-06 23:06:09 +00:00
|
|
|
func (c *Core) sealInternal(keepLock bool) error {
|
2017-08-04 20:42:51 +00:00
|
|
|
if c.sealed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enable that we are sealed to prevent further transactions
|
2015-03-10 00:45:34 +00:00
|
|
|
c.sealed = true
|
2015-03-13 18:16:24 +00:00
|
|
|
|
2017-02-28 23:17:19 +00:00
|
|
|
c.logger.Debug("core: marked as sealed")
|
|
|
|
|
2017-03-01 23:16:47 +00:00
|
|
|
// Clear forwarding clients
|
|
|
|
c.requestForwardingConnectionLock.Lock()
|
|
|
|
c.clearForwardingClients()
|
|
|
|
c.requestForwardingConnectionLock.Unlock()
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Do pre-seal teardown if HA is not enabled
|
|
|
|
if c.ha == nil {
|
2017-01-11 16:13:09 +00:00
|
|
|
// Even in a non-HA context we key off of this for some things
|
|
|
|
c.standby = true
|
2015-04-14 21:06:15 +00:00
|
|
|
if err := c.preSeal(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: pre-seal teardown failed", "error", err)
|
2015-04-14 21:06:15 +00:00
|
|
|
return fmt.Errorf("internal error")
|
|
|
|
}
|
|
|
|
} else {
|
2018-03-07 02:35:58 +00:00
|
|
|
if keepLock {
|
|
|
|
atomic.StoreUint32(&c.keepHALockOnStepDown, 1)
|
|
|
|
}
|
2018-03-06 23:06:09 +00:00
|
|
|
// If we are trying to acquire the lock, force it to return with nil so
|
|
|
|
// runStandby will exit
|
|
|
|
// If we are active, signal the standby goroutine to shut down and wait
|
|
|
|
// for completion. We have the state lock here so nothing else should
|
|
|
|
// be toggling standby status.
|
2018-03-07 02:35:58 +00:00
|
|
|
close(c.standbyStopCh)
|
|
|
|
c.logger.Trace("core: finished triggering standbyStopCh for runStandby")
|
2018-03-06 23:06:09 +00:00
|
|
|
|
2018-03-07 02:35:58 +00:00
|
|
|
// Wait for runStandby to stop
|
|
|
|
<-c.standbyDoneCh
|
|
|
|
atomic.StoreUint32(&c.keepHALockOnStepDown, 0)
|
|
|
|
c.logger.Trace("core: runStandby done")
|
2015-03-13 18:16:24 +00:00
|
|
|
}
|
|
|
|
|
2017-02-28 23:17:19 +00:00
|
|
|
c.logger.Debug("core: sealing barrier")
|
2015-03-13 18:34:40 +00:00
|
|
|
if err := c.barrier.Seal(); err != nil {
|
2017-02-28 23:17:19 +00:00
|
|
|
c.logger.Error("core: error sealing barrier", "error", err)
|
2015-03-13 18:34:40 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-02-27 00:43:55 +00:00
|
|
|
|
2016-04-23 02:55:17 +00:00
|
|
|
if c.ha != nil {
|
|
|
|
sd, ok := c.ha.(physical.ServiceDiscovery)
|
|
|
|
if ok {
|
2016-04-28 17:56:41 +00:00
|
|
|
if err := sd.NotifySealedStateChange(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("core: failed to notify sealed status", "error", err)
|
|
|
|
}
|
2016-04-28 17:56:41 +00:00
|
|
|
}
|
2016-04-23 02:55:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-28 23:17:19 +00:00
|
|
|
c.logger.Info("core: vault is sealed")
|
|
|
|
|
2015-03-13 18:34:40 +00:00
|
|
|
return nil
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-03-11 22:19:41 +00:00
|
|
|
|
|
|
|
// postUnseal is invoked after the barrier is unsealed, but before
|
|
|
|
// allowing any user operations. This allows us to setup any state that
|
|
|
|
// requires the Vault to be unsealed such as mount tables, logical backends,
|
|
|
|
// credential stores, etc.
|
2015-11-02 16:01:00 +00:00
|
|
|
func (c *Core) postUnseal() (retErr error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now())
|
2018-01-19 06:44:44 +00:00
|
|
|
|
|
|
|
// Create a new request context
|
|
|
|
c.activeContext, c.activeContextCancelFunc = context.WithCancel(context.Background())
|
|
|
|
|
2015-11-02 16:01:00 +00:00
|
|
|
defer func() {
|
|
|
|
if retErr != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
c.activeContextCancelFunc()
|
2015-11-02 16:01:00 +00:00
|
|
|
c.preSeal()
|
|
|
|
}
|
|
|
|
}()
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("core: post-unseal setup starting")
|
2017-01-06 20:42:18 +00:00
|
|
|
|
2017-03-01 23:16:47 +00:00
|
|
|
// Clear forwarding clients; we're active
|
|
|
|
c.requestForwardingConnectionLock.Lock()
|
|
|
|
c.clearForwardingClients()
|
|
|
|
c.requestForwardingConnectionLock.Unlock()
|
|
|
|
|
2018-01-26 03:21:51 +00:00
|
|
|
c.physicalCache.Purge(c.activeContext)
|
|
|
|
if !c.cachingDisabled {
|
|
|
|
c.physicalCache.SetEnabled(true)
|
2015-04-14 18:08:04 +00:00
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
|
2018-02-09 21:37:40 +00:00
|
|
|
switch c.sealUnwrapper.(type) {
|
|
|
|
case *sealUnwrapper:
|
|
|
|
c.sealUnwrapper.(*sealUnwrapper).runUnwraps()
|
|
|
|
case *transactionalSealUnwrapper:
|
|
|
|
c.sealUnwrapper.(*transactionalSealUnwrapper).runUnwraps()
|
|
|
|
}
|
|
|
|
|
2017-02-28 23:36:28 +00:00
|
|
|
// Purge these for safety in case of a rekey
|
2018-01-19 06:44:44 +00:00
|
|
|
c.seal.SetBarrierConfig(c.activeContext, nil)
|
2018-01-19 08:44:06 +00:00
|
|
|
if c.seal.RecoveryKeySupported() {
|
2018-01-19 06:44:44 +00:00
|
|
|
c.seal.SetRecoveryConfig(c.activeContext, nil)
|
2017-02-28 23:36:28 +00:00
|
|
|
}
|
|
|
|
|
2017-02-17 01:13:19 +00:00
|
|
|
if err := enterprisePostUnseal(c); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.ensureWrappingKey(c.activeContext); err != nil {
|
2017-01-04 21:44:03 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-08-16 02:10:32 +00:00
|
|
|
if err := c.setupPluginCatalog(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.loadMounts(c.activeContext); err != nil {
|
2015-03-11 22:19:41 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupMounts(c.activeContext); err != nil {
|
2015-03-11 22:50:27 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupPolicyStore(c.activeContext); err != nil {
|
2015-11-02 16:01:00 +00:00
|
|
|
return err
|
2015-03-18 21:00:42 +00:00
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.loadCORSConfig(c.activeContext); err != nil {
|
2017-06-17 04:04:55 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.loadCredentials(c.activeContext); err != nil {
|
2015-11-02 16:01:00 +00:00
|
|
|
return err
|
2015-03-18 22:46:07 +00:00
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupCredentials(c.activeContext); err != nil {
|
2015-11-02 16:01:00 +00:00
|
|
|
return err
|
2015-03-18 22:30:31 +00:00
|
|
|
}
|
2017-09-01 05:02:03 +00:00
|
|
|
if err := c.startRollback(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-24 01:00:14 +00:00
|
|
|
if err := c.setupExpiration(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.loadAudits(c.activeContext); err != nil {
|
2015-03-27 21:00:38 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupAudits(c.activeContext); err != nil {
|
2015-03-27 21:00:38 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.loadIdentityStoreArtifacts(c.activeContext); err != nil {
|
2017-10-23 20:03:36 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupAuditedHeadersConfig(c.activeContext); err != nil {
|
2017-02-02 19:49:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-04-04 00:52:29 +00:00
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
if c.ha != nil {
|
2018-01-19 09:11:59 +00:00
|
|
|
if err := c.startClusterListener(c.activeContext); err != nil {
|
2016-08-15 13:42:42 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-07-26 06:25:33 +00:00
|
|
|
}
|
2015-04-08 23:43:17 +00:00
|
|
|
c.metricsCh = make(chan struct{})
|
|
|
|
go c.emitMetrics(c.metricsCh)
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("core: post-unseal setup complete")
|
2015-03-11 22:19:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-03-13 18:16:24 +00:00
|
|
|
|
|
|
|
// preSeal is invoked before the barrier is sealed, allowing
|
|
|
|
// for any state teardown required.
|
2015-11-02 18:29:18 +00:00
|
|
|
func (c *Core) preSeal() error {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now())
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("core: pre-seal teardown starting")
|
2015-05-28 19:07:52 +00:00
|
|
|
|
|
|
|
// Clear any rekey progress
|
2016-04-04 14:44:22 +00:00
|
|
|
c.barrierRekeyConfig = nil
|
|
|
|
c.barrierRekeyProgress = nil
|
|
|
|
c.recoveryRekeyConfig = nil
|
|
|
|
c.recoveryRekeyProgress = nil
|
2015-05-28 19:07:52 +00:00
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
if c.metricsCh != nil {
|
|
|
|
close(c.metricsCh)
|
|
|
|
c.metricsCh = nil
|
|
|
|
}
|
2015-11-02 18:29:18 +00:00
|
|
|
var result error
|
2017-02-17 01:13:19 +00:00
|
|
|
|
2017-03-02 01:57:38 +00:00
|
|
|
c.stopClusterListener()
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2015-03-27 21:00:38 +00:00
|
|
|
if err := c.teardownAudits(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
|
2015-03-27 21:00:38 +00:00
|
|
|
}
|
2015-03-24 01:00:14 +00:00
|
|
|
if err := c.stopExpiration(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err))
|
2015-03-24 01:00:14 +00:00
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.teardownCredentials(c.activeContext); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err))
|
2015-03-18 22:30:31 +00:00
|
|
|
}
|
2015-03-18 21:00:42 +00:00
|
|
|
if err := c.teardownPolicyStore(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err))
|
2015-03-18 21:00:42 +00:00
|
|
|
}
|
2015-03-17 23:23:58 +00:00
|
|
|
if err := c.stopRollback(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err))
|
2015-03-17 23:23:58 +00:00
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.unloadMounts(c.activeContext); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err))
|
2015-03-13 18:16:24 +00:00
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
if err := enterprisePreSeal(c); err != nil {
|
|
|
|
result = multierror.Append(result, err)
|
|
|
|
}
|
|
|
|
|
2018-02-09 21:37:40 +00:00
|
|
|
switch c.sealUnwrapper.(type) {
|
|
|
|
case *sealUnwrapper:
|
|
|
|
c.sealUnwrapper.(*sealUnwrapper).stopUnwraps()
|
|
|
|
case *transactionalSealUnwrapper:
|
|
|
|
c.sealUnwrapper.(*transactionalSealUnwrapper).stopUnwraps()
|
|
|
|
}
|
|
|
|
|
2018-01-26 03:21:51 +00:00
|
|
|
// Purge the cache
|
|
|
|
c.physicalCache.SetEnabled(false)
|
|
|
|
c.physicalCache.Purge(c.activeContext)
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("core: pre-seal teardown complete")
|
2015-11-02 18:29:18 +00:00
|
|
|
return result
|
2015-03-13 18:16:24 +00:00
|
|
|
}
|
2015-04-08 23:43:17 +00:00
|
|
|
|
2017-02-17 01:13:19 +00:00
|
|
|
func enterprisePostUnsealImpl(c *Core) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func enterprisePreSealImpl(c *Core) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func startReplicationImpl(c *Core) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func stopReplicationImpl(c *Core) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:11:39 +00:00
|
|
|
// runStandby is a long running routine that is used when an HA backend
|
2015-04-14 21:06:15 +00:00
|
|
|
// is enabled. It waits until we are leader and switches this Vault to
|
|
|
|
// active.
|
2018-03-07 02:35:58 +00:00
|
|
|
func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) {
|
2015-04-14 21:06:15 +00:00
|
|
|
defer close(doneCh)
|
2016-02-29 02:35:32 +00:00
|
|
|
defer close(manualStepDownCh)
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("core: entering standby mode")
|
2015-05-28 23:11:31 +00:00
|
|
|
|
|
|
|
// Monitor for key rotation
|
|
|
|
keyRotateDone := make(chan struct{})
|
|
|
|
keyRotateStop := make(chan struct{})
|
2018-01-19 06:44:44 +00:00
|
|
|
go c.periodicCheckKeyUpgrade(context.Background(), keyRotateDone, keyRotateStop)
|
2017-05-24 19:06:56 +00:00
|
|
|
// Monitor for new leadership
|
|
|
|
checkLeaderDone := make(chan struct{})
|
|
|
|
checkLeaderStop := make(chan struct{})
|
|
|
|
go c.periodicLeaderRefresh(checkLeaderDone, checkLeaderStop)
|
2015-05-28 23:11:31 +00:00
|
|
|
defer func() {
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: closed periodic key rotation checker stop channel")
|
2015-05-28 23:11:31 +00:00
|
|
|
close(keyRotateStop)
|
|
|
|
<-keyRotateDone
|
2017-05-24 19:06:56 +00:00
|
|
|
close(checkLeaderStop)
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: closed periodic leader refresh stop channel")
|
2017-05-24 19:06:56 +00:00
|
|
|
<-checkLeaderDone
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: periodic leader refresh returned")
|
2015-05-28 23:11:31 +00:00
|
|
|
}()
|
|
|
|
|
2018-03-06 23:06:09 +00:00
|
|
|
var manualStepDown bool
|
2015-04-14 21:06:15 +00:00
|
|
|
for {
|
|
|
|
// Check for a shutdown
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
2018-03-06 23:06:09 +00:00
|
|
|
c.logger.Trace("core: stop channel triggered in runStandby")
|
2015-04-14 21:06:15 +00:00
|
|
|
return
|
|
|
|
default:
|
2018-03-06 23:06:09 +00:00
|
|
|
// If we've just down, we could instantly grab the lock again. Give
|
|
|
|
// the other nodes a chance.
|
|
|
|
if manualStepDown {
|
|
|
|
time.Sleep(manualStepDownSleepPeriod)
|
|
|
|
manualStepDown = false
|
|
|
|
}
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a lock
|
2016-01-13 18:40:08 +00:00
|
|
|
uuid, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to generate uuid", "error", err)
|
2016-01-13 18:40:08 +00:00
|
|
|
return
|
|
|
|
}
|
2015-04-14 23:44:48 +00:00
|
|
|
lock, err := c.ha.LockWith(coreLockPath, uuid)
|
2015-04-14 21:06:15 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to create lock", "error", err)
|
2015-04-14 21:06:15 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt the acquisition
|
2018-03-07 02:35:58 +00:00
|
|
|
leaderLostCh := c.acquireLock(lock, stopCh)
|
2015-04-14 21:06:15 +00:00
|
|
|
|
|
|
|
// Bail if we are being shutdown
|
2015-10-08 18:34:10 +00:00
|
|
|
if leaderLostCh == nil {
|
2015-04-14 21:06:15 +00:00
|
|
|
return
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Info("core: acquired lock, enabling active operation")
|
2015-04-14 21:06:15 +00:00
|
|
|
|
2016-07-18 17:38:44 +00:00
|
|
|
// This is used later to log a metrics event; this can be helpful to
|
|
|
|
// detect flapping
|
|
|
|
activeTime := time.Now()
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// Grab the lock as we need it for cluster setup, which needs to happen
|
2017-02-28 23:17:19 +00:00
|
|
|
// before advertising;
|
2018-03-07 02:35:58 +00:00
|
|
|
|
|
|
|
lockGrabbedCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
// Grab the lock
|
|
|
|
c.stateLock.Lock()
|
|
|
|
// If stopCh has been closed, which only happens while the
|
|
|
|
// stateLock is held, we have actually terminated, so we just
|
|
|
|
// instantly give up the lock, otherwise we notify that it's ready
|
|
|
|
// for consumption
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
c.stateLock.Unlock()
|
|
|
|
default:
|
|
|
|
close(lockGrabbedCh)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
lock.Unlock()
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
return
|
|
|
|
case <-lockGrabbedCh:
|
|
|
|
// We now have the lock and can use it
|
|
|
|
}
|
2017-02-28 23:17:19 +00:00
|
|
|
|
2018-03-06 23:06:09 +00:00
|
|
|
if c.sealed {
|
|
|
|
c.logger.Warn("core: grabbed HA lock but already sealed, exiting")
|
|
|
|
lock.Unlock()
|
|
|
|
c.stateLock.Unlock()
|
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the lock so that we can manually clear it later if needed
|
|
|
|
c.heldHALock = lock
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
// We haven't run postUnseal yet so we have nothing meaningful to use here
|
|
|
|
ctx := context.Background()
|
|
|
|
|
2017-02-28 23:17:19 +00:00
|
|
|
// This block is used to wipe barrier/seal state and verify that
|
|
|
|
// everything is sane. If we have no sanity in the barrier, we actually
|
|
|
|
// seal, as there's little we can do.
|
|
|
|
{
|
2018-01-19 06:44:44 +00:00
|
|
|
c.seal.SetBarrierConfig(ctx, nil)
|
2018-01-19 08:44:06 +00:00
|
|
|
if c.seal.RecoveryKeySupported() {
|
2018-01-19 06:44:44 +00:00
|
|
|
c.seal.SetRecoveryConfig(ctx, nil)
|
2017-02-28 23:17:19 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.performKeyUpgrades(ctx); err != nil {
|
2017-02-28 23:17:19 +00:00
|
|
|
// We call this in a goroutine so that we can give up the
|
|
|
|
// statelock and have this shut us down; sealInternal has a
|
2017-03-03 20:00:46 +00:00
|
|
|
// workflow where it watches for the stopCh to close so we want
|
2017-02-28 23:17:19 +00:00
|
|
|
// to return from here
|
|
|
|
c.logger.Error("core: error performing key upgrades", "error", err)
|
2018-03-06 23:06:09 +00:00
|
|
|
go c.Shutdown()
|
|
|
|
c.heldHALock = nil
|
2017-02-28 23:17:19 +00:00
|
|
|
lock.Unlock()
|
2018-03-06 23:06:09 +00:00
|
|
|
c.stateLock.Unlock()
|
2017-02-28 23:17:19 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-02 15:03:49 +00:00
|
|
|
// Clear previous local cluster cert info so we generate new. Since the
|
|
|
|
// UUID will have changed, standbys will know to look for new info
|
2018-02-23 19:47:07 +00:00
|
|
|
c.localClusterParsedCert.Store((*x509.Certificate)(nil))
|
|
|
|
c.localClusterCert.Store(([]byte)(nil))
|
|
|
|
c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil))
|
2017-03-02 15:03:49 +00:00
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.setupCluster(ctx); err != nil {
|
2018-03-06 23:06:09 +00:00
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
2016-08-15 13:42:42 +00:00
|
|
|
c.stateLock.Unlock()
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: cluster setup failed", "error", err)
|
2016-08-15 13:42:42 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advertise as leader
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.advertiseLeader(ctx, uuid, leaderLostCh); err != nil {
|
2018-03-06 23:06:09 +00:00
|
|
|
c.heldHALock = nil
|
|
|
|
lock.Unlock()
|
2016-08-15 13:42:42 +00:00
|
|
|
c.stateLock.Unlock()
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: leader advertisement setup failed", "error", err)
|
2016-07-18 17:38:44 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
2015-04-14 23:44:48 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Attempt the post-unseal process
|
|
|
|
err = c.postUnseal()
|
|
|
|
if err == nil {
|
2015-04-14 23:11:39 +00:00
|
|
|
c.standby = false
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
2018-03-06 23:06:09 +00:00
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
|
|
|
|
// Handle a failure to unseal
|
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: post-unseal setup failed", "error", err)
|
2015-04-14 21:06:15 +00:00
|
|
|
lock.Unlock()
|
2016-07-18 17:38:44 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
2015-04-14 21:06:15 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Monitor a loss of leadership
|
2018-03-07 02:35:58 +00:00
|
|
|
releaseHALock := true
|
2018-03-06 23:06:09 +00:00
|
|
|
grabStateLock := true
|
2015-04-14 21:06:15 +00:00
|
|
|
select {
|
2015-10-08 18:34:10 +00:00
|
|
|
case <-leaderLostCh:
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("core: leadership lost, stopping active operation")
|
2018-03-07 02:35:58 +00:00
|
|
|
case <-stopCh:
|
2018-03-06 23:06:09 +00:00
|
|
|
// This case comes from sealInternal; we will already be having the
|
|
|
|
// state lock held so we do toggle grabStateLock to false
|
2018-03-07 02:35:58 +00:00
|
|
|
if atomic.LoadUint32(&c.keepHALockOnStepDown) == 1 {
|
|
|
|
releaseHALock = false
|
|
|
|
}
|
2018-03-06 23:06:09 +00:00
|
|
|
grabStateLock = false
|
2016-02-29 02:35:32 +00:00
|
|
|
case <-manualStepDownCh:
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Warn("core: stepping down from active operation to standby")
|
2016-02-29 02:35:32 +00:00
|
|
|
manualStepDown = true
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
|
|
|
|
2016-07-18 17:38:44 +00:00
|
|
|
metrics.MeasureSince([]string{"core", "leadership_lost"}, activeTime)
|
|
|
|
|
2017-08-04 20:42:51 +00:00
|
|
|
// Tell any requests that know about this to stop
|
2018-01-19 06:44:44 +00:00
|
|
|
if c.activeContextCancelFunc != nil {
|
|
|
|
c.activeContextCancelFunc()
|
2017-08-04 20:42:51 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Attempt the pre-seal process
|
2018-03-06 23:06:09 +00:00
|
|
|
if grabStateLock {
|
|
|
|
c.stateLock.Lock()
|
|
|
|
}
|
2015-04-14 23:11:39 +00:00
|
|
|
c.standby = true
|
2015-11-02 16:01:00 +00:00
|
|
|
preSealErr := c.preSeal()
|
2018-03-06 23:06:09 +00:00
|
|
|
if grabStateLock {
|
|
|
|
c.stateLock.Unlock()
|
|
|
|
}
|
2015-04-14 21:06:15 +00:00
|
|
|
|
2018-03-07 02:35:58 +00:00
|
|
|
if releaseHALock {
|
2018-03-06 23:06:09 +00:00
|
|
|
if err := c.clearLeader(uuid); err != nil {
|
|
|
|
c.logger.Error("core: clearing leader advertisement failed", "error", err)
|
|
|
|
}
|
|
|
|
c.heldHALock.Unlock()
|
|
|
|
c.heldHALock = nil
|
|
|
|
}
|
2015-04-14 21:06:15 +00:00
|
|
|
|
|
|
|
// Check for a failure to prepare to seal
|
2015-11-02 16:01:00 +00:00
|
|
|
if preSealErr != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: pre-seal teardown failed", "error", err)
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-24 19:06:56 +00:00
|
|
|
// This checks the leader periodically to ensure that we switch RPC to a new
|
|
|
|
// leader pretty quickly. There is logic in Leader() already to not make this
|
|
|
|
// onerous and avoid more traffic than needed, so we just call that and ignore
|
|
|
|
// the result.
|
|
|
|
func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) {
|
|
|
|
defer close(doneCh)
|
2018-03-07 02:35:58 +00:00
|
|
|
var opCount int32
|
2017-05-24 19:06:56 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(leaderCheckInterval):
|
2018-03-07 02:35:58 +00:00
|
|
|
count := atomic.AddInt32(&opCount, 1)
|
|
|
|
if count > 1 {
|
|
|
|
atomic.AddInt32(&opCount, -1)
|
|
|
|
continue
|
|
|
|
}
|
2018-03-06 23:06:09 +00:00
|
|
|
// We do this in a goroutine because otherwise if this refresh is
|
|
|
|
// called while we're shutting down the call to Leader() can
|
|
|
|
// deadlock, which then means stopCh can never been seen and we can
|
|
|
|
// block shutdown
|
2018-03-07 02:35:58 +00:00
|
|
|
go func() {
|
|
|
|
defer atomic.AddInt32(&opCount, -1)
|
|
|
|
c.Leader()
|
|
|
|
}()
|
2017-05-24 19:06:56 +00:00
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-28 23:11:31 +00:00
|
|
|
// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) periodicCheckKeyUpgrade(ctx context.Context, doneCh, stopCh chan struct{}) {
|
2015-05-28 23:11:31 +00:00
|
|
|
defer close(doneCh)
|
2018-03-07 02:35:58 +00:00
|
|
|
var opCount int32
|
2015-05-28 23:11:31 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(keyRotateCheckInterval):
|
2018-03-07 02:35:58 +00:00
|
|
|
count := atomic.AddInt32(&opCount, 1)
|
|
|
|
if count > 1 {
|
|
|
|
atomic.AddInt32(&opCount, -1)
|
2015-05-28 23:11:31 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-03-07 02:35:58 +00:00
|
|
|
go func() {
|
|
|
|
defer atomic.AddInt32(&opCount, -1)
|
|
|
|
// Only check if we are a standby
|
|
|
|
c.stateLock.RLock()
|
|
|
|
standby := c.standby
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if !standby {
|
|
|
|
return
|
|
|
|
}
|
2017-03-04 15:21:13 +00:00
|
|
|
|
2018-03-07 02:35:58 +00:00
|
|
|
// Check for a poison pill. If we can read it, it means we have stale
|
|
|
|
// keys (e.g. from replication being activated) and we need to seal to
|
|
|
|
// be unsealed again.
|
|
|
|
entry, _ := c.barrier.Get(ctx, poisonPillPath)
|
|
|
|
if entry != nil && len(entry.Value) > 0 {
|
|
|
|
c.logger.Warn("core: encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
|
|
|
|
go c.Shutdown()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.checkKeyUpgrades(ctx); err != nil {
|
|
|
|
c.logger.Error("core: key rotation periodic upgrade check failed", "error", err)
|
|
|
|
}
|
|
|
|
}()
|
2015-05-28 23:11:31 +00:00
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkKeyUpgrades is used to check if there have been any key rotations
|
|
|
|
// and if there is a chain of upgrades available
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) checkKeyUpgrades(ctx context.Context) error {
|
2015-05-28 23:11:31 +00:00
|
|
|
for {
|
2015-05-28 23:43:44 +00:00
|
|
|
// Check for an upgrade
|
2018-01-19 06:44:44 +00:00
|
|
|
didUpgrade, newTerm, err := c.barrier.CheckUpgrade(ctx)
|
2015-05-28 23:11:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if no upgrade
|
2015-05-28 23:43:44 +00:00
|
|
|
if !didUpgrade {
|
2015-05-28 23:11:31 +00:00
|
|
|
break
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsInfo() {
|
|
|
|
c.logger.Info("core: upgraded to new key term", "term", newTerm)
|
|
|
|
}
|
2015-05-28 23:11:31 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-28 23:52:06 +00:00
|
|
|
// scheduleUpgradeCleanup is used to ensure that all the upgrade paths
|
|
|
|
// are cleaned up in a timely manner if a leader failover takes place
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) scheduleUpgradeCleanup(ctx context.Context) error {
|
2015-05-28 23:52:06 +00:00
|
|
|
// List the upgrades
|
2018-01-19 06:44:44 +00:00
|
|
|
upgrades, err := c.barrier.List(ctx, keyringUpgradePrefix)
|
2015-05-28 23:52:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to list upgrades: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if no upgrades
|
|
|
|
if len(upgrades) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Schedule cleanup for all of them
|
|
|
|
time.AfterFunc(keyRotateGracePeriod, func() {
|
2017-06-05 20:00:56 +00:00
|
|
|
sealed, err := c.barrier.Sealed()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Warn("core: failed to check barrier status at upgrade cleanup time")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if sealed {
|
|
|
|
c.logger.Warn("core: barrier sealed at upgrade cleanup time")
|
|
|
|
return
|
|
|
|
}
|
2015-05-28 23:52:06 +00:00
|
|
|
for _, upgrade := range upgrades {
|
|
|
|
path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.barrier.Delete(ctx, path); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to cleanup upgrade", "path", path, "error", err)
|
2015-05-28 23:52:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) performKeyUpgrades(ctx context.Context) error {
|
|
|
|
if err := c.checkKeyUpgrades(ctx); err != nil {
|
2017-02-28 23:17:19 +00:00
|
|
|
return errwrap.Wrapf("error checking for key upgrades: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.barrier.ReloadMasterKey(ctx); err != nil {
|
2017-02-28 23:17:19 +00:00
|
|
|
return errwrap.Wrapf("error reloading master key: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.barrier.ReloadKeyring(ctx); err != nil {
|
2017-02-28 23:17:19 +00:00
|
|
|
return errwrap.Wrapf("error reloading keyring: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
if err := c.scheduleUpgradeCleanup(ctx); err != nil {
|
2017-02-28 23:17:19 +00:00
|
|
|
return errwrap.Wrapf("error scheduling upgrade cleanup: {{err}}", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-08 18:34:10 +00:00
|
|
|
// acquireLock blocks until the lock is acquired, returning the leaderLostCh
|
2015-04-14 21:06:15 +00:00
|
|
|
func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} {
|
|
|
|
for {
|
|
|
|
// Attempt lock acquisition
|
2015-10-08 18:34:10 +00:00
|
|
|
leaderLostCh, err := lock.Lock(stopCh)
|
2015-04-14 21:06:15 +00:00
|
|
|
if err == nil {
|
2015-10-08 18:34:10 +00:00
|
|
|
return leaderLostCh
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Retry the acquisition
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to acquire lock", "error", err)
|
2015-04-14 21:06:15 +00:00
|
|
|
select {
|
|
|
|
case <-time.After(lockRetryInterval):
|
|
|
|
case <-stopCh:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// advertiseLeader is used to advertise the current node as leader
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) error {
|
|
|
|
go c.cleanLeaderPrefix(ctx, uuid, leaderLostCh)
|
2016-08-15 13:42:42 +00:00
|
|
|
|
|
|
|
var key *ecdsa.PrivateKey
|
2018-02-23 19:47:07 +00:00
|
|
|
switch c.localClusterPrivateKey.Load().(type) {
|
2016-08-15 13:42:42 +00:00
|
|
|
case *ecdsa.PrivateKey:
|
2018-02-23 19:47:07 +00:00
|
|
|
key = c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey)
|
2016-08-15 13:42:42 +00:00
|
|
|
default:
|
2018-02-23 19:47:07 +00:00
|
|
|
c.logger.Error("core: unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey.Load()))
|
|
|
|
return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey.Load())
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 18:49:11 +00:00
|
|
|
keyParams := &clusterKeyParams{
|
2016-08-15 13:42:42 +00:00
|
|
|
Type: corePrivateKeyTypeP521,
|
|
|
|
X: key.X,
|
|
|
|
Y: key.Y,
|
|
|
|
D: key.D,
|
|
|
|
}
|
|
|
|
|
2018-02-23 19:47:07 +00:00
|
|
|
locCert := c.localClusterCert.Load().([]byte)
|
|
|
|
localCert := make([]byte, len(locCert))
|
|
|
|
copy(localCert, locCert)
|
2016-08-15 13:42:42 +00:00
|
|
|
adv := &activeAdvertisement{
|
|
|
|
RedirectAddr: c.redirectAddr,
|
|
|
|
ClusterAddr: c.clusterAddr,
|
2018-02-23 19:47:07 +00:00
|
|
|
ClusterCert: localCert,
|
2016-08-15 13:42:42 +00:00
|
|
|
ClusterKeyParams: keyParams,
|
|
|
|
}
|
|
|
|
val, err := jsonutil.EncodeJSON(adv)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-14 23:44:48 +00:00
|
|
|
ent := &Entry{
|
|
|
|
Key: coreLeaderPrefix + uuid,
|
2016-08-15 13:42:42 +00:00
|
|
|
Value: val,
|
2015-04-14 23:44:48 +00:00
|
|
|
}
|
2018-01-19 06:44:44 +00:00
|
|
|
err = c.barrier.Put(ctx, ent)
|
2016-04-23 02:55:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
sd, ok := c.ha.(physical.ServiceDiscovery)
|
|
|
|
if ok {
|
2016-04-28 17:56:41 +00:00
|
|
|
if err := sd.NotifyActiveStateChange(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("core: failed to notify active status", "error", err)
|
|
|
|
}
|
2016-04-28 17:56:41 +00:00
|
|
|
}
|
2016-04-23 02:55:17 +00:00
|
|
|
}
|
|
|
|
return nil
|
2015-04-14 23:44:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
func (c *Core) cleanLeaderPrefix(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) {
|
|
|
|
keys, err := c.barrier.List(ctx, coreLeaderPrefix)
|
2015-10-08 17:47:21 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.Error("core: failed to list entries in core/leader", "error", err)
|
2015-10-08 17:47:21 +00:00
|
|
|
return
|
|
|
|
}
|
2015-10-08 18:34:10 +00:00
|
|
|
for len(keys) > 0 {
|
2015-10-08 17:47:21 +00:00
|
|
|
select {
|
2015-10-08 18:34:10 +00:00
|
|
|
case <-time.After(leaderPrefixCleanDelay):
|
2015-10-08 17:47:21 +00:00
|
|
|
if keys[0] != uuid {
|
2018-01-19 06:44:44 +00:00
|
|
|
c.barrier.Delete(ctx, coreLeaderPrefix+keys[0])
|
2015-10-08 17:47:21 +00:00
|
|
|
}
|
|
|
|
keys = keys[1:]
|
2015-10-08 18:34:10 +00:00
|
|
|
case <-leaderLostCh:
|
2015-10-08 17:47:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// clearLeader is used to clear our leadership entry
|
|
|
|
func (c *Core) clearLeader(uuid string) error {
|
|
|
|
key := coreLeaderPrefix + uuid
|
2018-01-19 06:44:44 +00:00
|
|
|
err := c.barrier.Delete(c.activeContext, key)
|
2016-04-24 14:04:51 +00:00
|
|
|
|
|
|
|
// Advertise ourselves as a standby
|
|
|
|
sd, ok := c.ha.(physical.ServiceDiscovery)
|
|
|
|
if ok {
|
2016-04-28 17:56:41 +00:00
|
|
|
if err := sd.NotifyActiveStateChange(); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsWarn() {
|
|
|
|
c.logger.Warn("core: failed to notify standby status", "error", err)
|
|
|
|
}
|
2016-04-28 17:56:41 +00:00
|
|
|
}
|
2016-04-24 14:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2015-04-14 23:44:48 +00:00
|
|
|
}
|
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
// emitMetrics is used to periodically expose metrics while runnig
|
|
|
|
func (c *Core) emitMetrics(stopCh chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(time.Second):
|
2015-10-12 20:33:54 +00:00
|
|
|
c.metricsMutex.Lock()
|
|
|
|
if c.expiration != nil {
|
|
|
|
c.expiration.emitMetrics()
|
|
|
|
}
|
|
|
|
c.metricsMutex.Unlock()
|
2015-04-08 23:43:17 +00:00
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-04-04 14:44:22 +00:00
|
|
|
|
2017-02-16 20:15:02 +00:00
|
|
|
func (c *Core) ReplicationState() consts.ReplicationState {
|
2018-01-16 18:51:55 +00:00
|
|
|
return consts.ReplicationState(atomic.LoadUint32(c.replicationState))
|
2017-02-16 20:15:02 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 00:24:04 +00:00
|
|
|
func (c *Core) ActiveNodeReplicationState() consts.ReplicationState {
|
|
|
|
return consts.ReplicationState(atomic.LoadUint32(c.activeNodeReplicationState))
|
|
|
|
}
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
func (c *Core) SealAccess() *SealAccess {
|
2017-10-23 20:03:36 +00:00
|
|
|
return NewSealAccess(c.seal)
|
2016-04-04 14:44:22 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
func (c *Core) Logger() log.Logger {
|
2016-08-15 13:42:42 +00:00
|
|
|
return c.logger
|
|
|
|
}
|
2016-08-15 20:01:15 +00:00
|
|
|
|
|
|
|
func (c *Core) BarrierKeyLength() (min, max int) {
|
|
|
|
min, max = c.barrier.KeyLength()
|
|
|
|
max += shamir.ShareOverhead
|
|
|
|
return
|
|
|
|
}
|
2017-02-02 19:49:20 +00:00
|
|
|
|
|
|
|
func (c *Core) AuditedHeadersConfig() *AuditedHeadersConfig {
|
|
|
|
return c.auditedHeaders
|
|
|
|
}
|
2017-03-01 17:39:42 +00:00
|
|
|
|
2017-03-01 17:42:10 +00:00
|
|
|
func lastRemoteWALImpl(c *Core) uint64 {
|
2017-03-01 17:39:42 +00:00
|
|
|
return 0
|
|
|
|
}
|
2017-10-23 20:03:36 +00:00
|
|
|
|
|
|
|
func (c *Core) BarrierEncryptorAccess() *BarrierEncryptorAccess {
|
|
|
|
return NewBarrierEncryptorAccess(c.barrier)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) PhysicalAccess() *physical.PhysicalAccess {
|
|
|
|
return physical.NewPhysicalAccess(c.physical)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) RouterAccess() *RouterAccess {
|
|
|
|
return NewRouterAccess(c)
|
|
|
|
}
|
2018-01-03 20:07:13 +00:00
|
|
|
|
|
|
|
// IsDRSecondary returns if the current cluster state is a DR secondary.
|
|
|
|
func (c *Core) IsDRSecondary() bool {
|
|
|
|
return c.ReplicationState().HasState(consts.ReplicationDRSecondary)
|
|
|
|
}
|