2015-03-09 23:33:27 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
2015-03-11 18:43:36 +00:00
|
|
|
"bytes"
|
2015-08-25 16:32:45 +00:00
|
|
|
"encoding/base64"
|
2015-12-16 21:56:15 +00:00
|
|
|
"encoding/hex"
|
2015-03-10 00:45:34 +00:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
2015-03-09 23:33:27 +00:00
|
|
|
"fmt"
|
2015-03-10 00:45:34 +00:00
|
|
|
"log"
|
2015-05-02 20:28:33 +00:00
|
|
|
"net/url"
|
2015-03-11 18:52:01 +00:00
|
|
|
"os"
|
2015-04-03 01:05:23 +00:00
|
|
|
"strings"
|
2015-03-10 00:45:34 +00:00
|
|
|
"sync"
|
2015-04-08 23:43:17 +00:00
|
|
|
"time"
|
2015-03-09 23:33:27 +00:00
|
|
|
|
2015-08-25 16:32:45 +00:00
|
|
|
"golang.org/x/crypto/openpgp"
|
|
|
|
"golang.org/x/crypto/openpgp/packet"
|
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2015-11-02 16:01:00 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2015-11-02 18:29:18 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2015-12-16 17:56:20 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
2015-03-27 20:45:13 +00:00
|
|
|
"github.com/hashicorp/vault/audit"
|
2015-04-28 21:59:43 +00:00
|
|
|
"github.com/hashicorp/vault/helper/mlock"
|
2015-08-25 21:24:19 +00:00
|
|
|
"github.com/hashicorp/vault/helper/pgpkeys"
|
2015-03-15 21:53:41 +00:00
|
|
|
"github.com/hashicorp/vault/logical"
|
2015-03-09 23:33:27 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
2015-03-11 18:34:08 +00:00
|
|
|
"github.com/hashicorp/vault/shamir"
|
2015-03-09 23:33:27 +00:00
|
|
|
)
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
const (
|
|
|
|
// coreSealConfigPath is the path used to store our seal configuration.
|
|
|
|
// This value is stored in plaintext, since we must be able to read
|
|
|
|
// it even with the Vault sealed. This is required so that we know
|
|
|
|
// how many secret parts must be used to reconstruct the master key.
|
|
|
|
coreSealConfigPath = "core/seal-config"
|
2015-04-14 21:06:15 +00:00
|
|
|
|
|
|
|
// coreLockPath is the path used to acquire a coordinating lock
|
|
|
|
// for a highly-available deploy.
|
|
|
|
coreLockPath = "core/lock"
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// coreLeaderPrefix is the prefix used for the UUID that contains
|
|
|
|
// the currently elected leader.
|
|
|
|
coreLeaderPrefix = "core/leader/"
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
// coreUnsealKeysBackupPath is the path used to back upencrypted unseal
|
|
|
|
// keys if specified during a rekey operation. This is outside of the
|
|
|
|
// barrier.
|
|
|
|
coreUnsealKeysBackupPath = "core/unseal-keys-backup"
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// lockRetryInterval is the interval we re-attempt to acquire the
|
|
|
|
// HA lock if an error is encountered
|
|
|
|
lockRetryInterval = 10 * time.Second
|
2015-05-28 23:11:31 +00:00
|
|
|
|
|
|
|
// keyRotateCheckInterval is how often a standby checks for a key
|
|
|
|
// rotation taking place.
|
|
|
|
keyRotateCheckInterval = 30 * time.Second
|
|
|
|
|
|
|
|
// keyRotateGracePeriod is how long we allow an upgrade path
|
|
|
|
// for standby instances before we delete the upgrade keys
|
|
|
|
keyRotateGracePeriod = 2 * time.Minute
|
2015-10-08 18:34:10 +00:00
|
|
|
|
|
|
|
// leaderPrefixCleanDelay is how long to wait between deletions
|
|
|
|
// of orphaned leader keys, to prevent slamming the backend.
|
|
|
|
leaderPrefixCleanDelay = 200 * time.Millisecond
|
2015-03-10 00:45:34 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrSealed is returned if an operation is performed on
|
|
|
|
// a sealed barrier. No operation is expected to succeed before unsealing
|
|
|
|
ErrSealed = errors.New("Vault is sealed")
|
|
|
|
|
2015-04-14 21:09:11 +00:00
|
|
|
// ErrStandby is returned if an operation is performed on
|
|
|
|
// a standby Vault. No operation is expected to succeed until active.
|
|
|
|
ErrStandby = errors.New("Vault is in standby mode")
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
// ErrAlreadyInit is returned if the core is already
|
|
|
|
// initialized. This prevents a re-initialization.
|
|
|
|
ErrAlreadyInit = errors.New("Vault is already initialized")
|
|
|
|
|
|
|
|
// ErrNotInit is returned if a non-initialized barrier
|
|
|
|
// is attempted to be unsealed.
|
|
|
|
ErrNotInit = errors.New("Vault is not initialized")
|
2015-03-16 22:28:50 +00:00
|
|
|
|
|
|
|
// ErrInternalError is returned when we don't want to leak
|
|
|
|
// any information about an internal error
|
|
|
|
ErrInternalError = errors.New("internal error")
|
2015-04-14 23:53:40 +00:00
|
|
|
|
|
|
|
// ErrHANotEnabled is returned if the operation only makes sense
|
|
|
|
// in an HA setting
|
|
|
|
ErrHANotEnabled = errors.New("Vault is not configured for highly-available mode")
|
2015-03-10 00:45:34 +00:00
|
|
|
)
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// SealConfig is used to describe the seal configuration
|
|
|
|
type SealConfig struct {
|
2015-03-11 18:34:08 +00:00
|
|
|
// SecretShares is the number of shares the secret is
|
2015-08-25 16:32:45 +00:00
|
|
|
// split into. This is the N value of Shamir.
|
2015-03-11 18:34:08 +00:00
|
|
|
SecretShares int `json:"secret_shares"`
|
2015-03-09 23:33:27 +00:00
|
|
|
|
2015-08-25 22:33:58 +00:00
|
|
|
// PGPKeys is the array of public PGP keys used,
|
2015-08-25 16:32:45 +00:00
|
|
|
// if requested, to encrypt the output unseal tokens. If
|
|
|
|
// provided, it sets the value of SecretShares. Ordering
|
|
|
|
// is important.
|
2015-12-16 21:56:15 +00:00
|
|
|
PGPKeys []string `json:"pgp_keys"`
|
2015-08-25 16:32:45 +00:00
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// SecretThreshold is the number of parts required
|
|
|
|
// to open the vault. This is the T value of Shamir
|
|
|
|
SecretThreshold int `json:"secret_threshold"`
|
2015-12-16 21:56:15 +00:00
|
|
|
|
|
|
|
// Nonce is a nonce generated by Vault used to ensure that when unseal keys
|
|
|
|
// are submitted for a rekey operation, the rekey operation itself is the
|
|
|
|
// one intended. This prevents hijacking of the rekey operation, since it
|
|
|
|
// is unauthenticated.
|
|
|
|
Nonce string `json:"nonce"`
|
|
|
|
|
|
|
|
// Backup indicates whether or not a backup of PGP-encrypted unseal keys
|
|
|
|
// should be stored at coreUnsealKeysBackupPath after successful rekeying.
|
|
|
|
Backup bool `json:"backup"`
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
// Validate is used to sanity check the seal configuration
|
|
|
|
func (s *SealConfig) Validate() error {
|
2015-03-11 18:34:08 +00:00
|
|
|
if s.SecretShares < 1 {
|
|
|
|
return fmt.Errorf("secret shares must be at least one")
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-03-11 18:34:08 +00:00
|
|
|
if s.SecretThreshold < 1 {
|
|
|
|
return fmt.Errorf("secret threshold must be at least one")
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-05-08 18:05:31 +00:00
|
|
|
if s.SecretShares > 1 && s.SecretThreshold == 1 {
|
|
|
|
return fmt.Errorf("secret threshold must be greater than one for multiple shares")
|
|
|
|
}
|
2015-03-12 18:20:27 +00:00
|
|
|
if s.SecretShares > 255 {
|
|
|
|
return fmt.Errorf("secret shares must be less than 256")
|
|
|
|
}
|
|
|
|
if s.SecretThreshold > 255 {
|
|
|
|
return fmt.Errorf("secret threshold must be less than 256")
|
|
|
|
}
|
2015-03-11 18:34:08 +00:00
|
|
|
if s.SecretThreshold > s.SecretShares {
|
|
|
|
return fmt.Errorf("secret threshold cannot be larger than secret shares")
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-08-25 22:33:58 +00:00
|
|
|
if len(s.PGPKeys) > 0 && len(s.PGPKeys) != s.SecretShares {
|
2015-08-25 16:32:45 +00:00
|
|
|
return fmt.Errorf("count mismatch between number of provided PGP keys and number of shares")
|
|
|
|
}
|
2015-08-25 22:33:58 +00:00
|
|
|
if len(s.PGPKeys) > 0 {
|
|
|
|
for _, keystring := range s.PGPKeys {
|
2015-08-25 16:32:45 +00:00
|
|
|
data, err := base64.StdEncoding.DecodeString(keystring)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error decoding given PGP key: %s", err)
|
|
|
|
}
|
|
|
|
_, err = openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error parsing given PGP key: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-10 00:45:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// InitResult is used to provide the key parts back after
|
|
|
|
// they are generated as part of the initialization.
|
|
|
|
type InitResult struct {
|
2015-03-11 18:34:08 +00:00
|
|
|
SecretShares [][]byte
|
2015-03-24 00:31:30 +00:00
|
|
|
RootToken string
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 18:40:01 +00:00
|
|
|
// RekeyResult is used to provide the key parts back after
|
|
|
|
// they are generated as part of the rekey.
|
|
|
|
type RekeyResult struct {
|
2015-12-16 21:56:15 +00:00
|
|
|
SecretShares [][]byte
|
|
|
|
PGPFingerprints []string
|
|
|
|
Backup bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// RekeyBackup stores the backup copy of PGP-encrypted keys
|
|
|
|
type RekeyBackup struct {
|
|
|
|
Nonce string
|
|
|
|
Keys map[string]string
|
2015-05-28 18:40:01 +00:00
|
|
|
}
|
|
|
|
|
2015-03-12 18:20:27 +00:00
|
|
|
// ErrInvalidKey is returned if there is an error with a
|
|
|
|
// provided unseal key.
|
|
|
|
type ErrInvalidKey struct {
|
|
|
|
Reason string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *ErrInvalidKey) Error() string {
|
|
|
|
return fmt.Sprintf("invalid key: %v", e.Reason)
|
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Core is used as the central manager of Vault activity. It is the primary point of
|
|
|
|
// interface for API handlers and is responsible for managing the logical and physical
|
|
|
|
// backends, router, security barrier, and audit trails.
|
|
|
|
type Core struct {
|
2015-04-14 21:06:15 +00:00
|
|
|
// HABackend may be available depending on the physical backend
|
|
|
|
ha physical.HABackend
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// AdvertiseAddr is the address we advertise as leader if held
|
|
|
|
advertiseAddr string
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// physical backend is the un-trusted backend with durable data
|
|
|
|
physical physical.Backend
|
|
|
|
|
|
|
|
// barrier is the security barrier wrapping the physical backend
|
|
|
|
barrier SecurityBarrier
|
|
|
|
|
|
|
|
// router is responsible for managing the mount points for logical backends.
|
|
|
|
router *Router
|
2015-03-10 00:45:34 +00:00
|
|
|
|
2015-03-18 22:21:41 +00:00
|
|
|
// logicalBackends is the mapping of backends to use for this core
|
|
|
|
logicalBackends map[string]logical.Factory
|
|
|
|
|
|
|
|
// credentialBackends is the mapping of backends to use for this core
|
2015-03-31 01:07:05 +00:00
|
|
|
credentialBackends map[string]logical.Factory
|
2015-03-15 23:25:38 +00:00
|
|
|
|
2015-03-27 20:45:13 +00:00
|
|
|
// auditBackends is the mapping of backends to use for this core
|
|
|
|
auditBackends map[string]audit.Factory
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
// stateLock protects mutable state
|
|
|
|
stateLock sync.RWMutex
|
|
|
|
sealed bool
|
|
|
|
|
2015-04-14 23:11:39 +00:00
|
|
|
standby bool
|
2015-04-14 21:06:15 +00:00
|
|
|
standbyDoneCh chan struct{}
|
|
|
|
standbyStopCh chan struct{}
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
// unlockParts has the keys provided to Unseal until
|
|
|
|
// the threshold number of parts is available.
|
|
|
|
unlockParts [][]byte
|
|
|
|
|
2015-05-28 18:40:01 +00:00
|
|
|
// rekeyProgress holds the shares we have until we reach enough
|
|
|
|
// to verify the master key.
|
|
|
|
rekeyConfig *SealConfig
|
|
|
|
rekeyProgress [][]byte
|
|
|
|
rekeyLock sync.Mutex
|
|
|
|
|
2015-03-11 22:19:41 +00:00
|
|
|
// mounts is loaded after unseal since it is a protected
|
|
|
|
// configuration
|
2015-03-17 22:28:01 +00:00
|
|
|
mounts *MountTable
|
2015-03-11 22:19:41 +00:00
|
|
|
|
2015-11-11 16:44:07 +00:00
|
|
|
// mountsLock is used to ensure that the mounts table does not
|
|
|
|
// change underneath a calling function
|
|
|
|
mountsLock sync.RWMutex
|
|
|
|
|
2015-03-18 22:46:07 +00:00
|
|
|
// auth is loaded after unseal since it is a protected
|
|
|
|
// configuration
|
2015-03-19 16:54:57 +00:00
|
|
|
auth *MountTable
|
2015-03-18 22:46:07 +00:00
|
|
|
|
2015-11-11 16:44:07 +00:00
|
|
|
// authLock is used to ensure that the auth table does not
|
|
|
|
// change underneath a calling function
|
|
|
|
authLock sync.RWMutex
|
|
|
|
|
2015-03-27 20:45:13 +00:00
|
|
|
// audit is loaded after unseal since it is a protected
|
|
|
|
// configuration
|
|
|
|
audit *MountTable
|
|
|
|
|
2015-11-11 16:44:07 +00:00
|
|
|
// auditLock is used to ensure that the audit table does not
|
|
|
|
// change underneath a calling function
|
|
|
|
auditLock sync.RWMutex
|
|
|
|
|
2015-03-31 20:22:40 +00:00
|
|
|
// auditBroker is used to ingest the audit events and fan
|
|
|
|
// out into the configured audit backends
|
|
|
|
auditBroker *AuditBroker
|
|
|
|
|
2015-09-04 20:58:12 +00:00
|
|
|
// systemBarrierView is the barrier view for the system backend
|
|
|
|
systemBarrierView *BarrierView
|
2015-03-12 19:41:12 +00:00
|
|
|
|
2015-04-08 20:35:32 +00:00
|
|
|
// expiration manager is used for managing LeaseIDs,
|
2015-03-12 19:44:22 +00:00
|
|
|
// renewal, expiration and revocation
|
|
|
|
expiration *ExpirationManager
|
|
|
|
|
2015-03-17 23:23:58 +00:00
|
|
|
// rollback manager is used to run rollbacks periodically
|
|
|
|
rollback *RollbackManager
|
|
|
|
|
2015-03-18 21:00:42 +00:00
|
|
|
// policy store is used to manage named ACL policies
|
2015-11-06 16:52:26 +00:00
|
|
|
policyStore *PolicyStore
|
2015-03-18 21:00:42 +00:00
|
|
|
|
2015-03-23 20:41:05 +00:00
|
|
|
// token store is used to manage authentication tokens
|
|
|
|
tokenStore *TokenStore
|
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
// metricsCh is used to stop the metrics streaming
|
|
|
|
metricsCh chan struct{}
|
|
|
|
|
2015-10-12 20:33:54 +00:00
|
|
|
// metricsMutex is used to prevent a race condition between
|
|
|
|
// metrics emission and sealing leading to a nil pointer
|
|
|
|
metricsMutex sync.Mutex
|
|
|
|
|
2015-08-27 14:50:16 +00:00
|
|
|
defaultLeaseTTL time.Duration
|
|
|
|
maxLeaseTTL time.Duration
|
2015-07-30 13:42:49 +00:00
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
logger *log.Logger
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 18:52:01 +00:00
|
|
|
// CoreConfig is used to parameterize a core
|
|
|
|
type CoreConfig struct {
|
2015-08-27 14:50:16 +00:00
|
|
|
LogicalBackends map[string]logical.Factory
|
|
|
|
CredentialBackends map[string]logical.Factory
|
|
|
|
AuditBackends map[string]audit.Factory
|
|
|
|
Physical physical.Backend
|
2015-12-18 15:34:54 +00:00
|
|
|
HAPhysical physical.HABackend // May be nil, which disables HA operations
|
|
|
|
Logger *log.Logger
|
|
|
|
DisableCache bool // Disables the LRU cache on the physical backend
|
|
|
|
DisableMlock bool // Disables mlock syscall
|
|
|
|
CacheSize int // Custom cache size of zero for default
|
|
|
|
AdvertiseAddr string // Set as the leader address for HA
|
|
|
|
DefaultLeaseTTL time.Duration
|
|
|
|
MaxLeaseTTL time.Duration
|
2015-03-11 18:52:01 +00:00
|
|
|
}
|
|
|
|
|
2015-07-30 13:42:49 +00:00
|
|
|
// NewCore is used to construct a new core
|
2015-03-11 18:52:01 +00:00
|
|
|
func NewCore(conf *CoreConfig) (*Core, error) {
|
2015-12-14 22:58:30 +00:00
|
|
|
if conf.HAPhysical != nil && conf.AdvertiseAddr == "" {
|
2015-04-14 23:44:48 +00:00
|
|
|
return nil, fmt.Errorf("missing advertisement address")
|
|
|
|
}
|
2015-04-14 21:06:15 +00:00
|
|
|
|
2015-08-27 14:50:16 +00:00
|
|
|
if conf.DefaultLeaseTTL == 0 {
|
|
|
|
conf.DefaultLeaseTTL = defaultLeaseTTL
|
2015-07-30 13:42:49 +00:00
|
|
|
}
|
2015-08-27 14:50:16 +00:00
|
|
|
if conf.MaxLeaseTTL == 0 {
|
|
|
|
conf.MaxLeaseTTL = maxLeaseTTL
|
2015-07-30 13:42:49 +00:00
|
|
|
}
|
2015-08-27 14:50:16 +00:00
|
|
|
if conf.DefaultLeaseTTL > conf.MaxLeaseTTL {
|
|
|
|
return nil, fmt.Errorf("cannot have DefaultLeaseTTL larger than MaxLeaseTTL")
|
2015-07-30 13:42:49 +00:00
|
|
|
}
|
2015-08-20 17:14:13 +00:00
|
|
|
|
2015-05-02 20:28:33 +00:00
|
|
|
// Validate the advertise addr if its given to us
|
|
|
|
if conf.AdvertiseAddr != "" {
|
|
|
|
u, err := url.Parse(conf.AdvertiseAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("advertisement address is not valid url: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if u.Scheme == "" {
|
|
|
|
return nil, fmt.Errorf("advertisement address must include scheme (ex. 'http')")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 18:08:04 +00:00
|
|
|
// Wrap the backend in a cache unless disabled
|
|
|
|
if !conf.DisableCache {
|
|
|
|
_, isCache := conf.Physical.(*physical.Cache)
|
|
|
|
_, isInmem := conf.Physical.(*physical.InmemBackend)
|
|
|
|
if !isCache && !isInmem {
|
|
|
|
cache := physical.NewCache(conf.Physical, conf.CacheSize)
|
|
|
|
conf.Physical = cache
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-27 23:40:14 +00:00
|
|
|
if !conf.DisableMlock {
|
|
|
|
// Ensure our memory usage is locked into physical RAM
|
2015-04-28 21:59:43 +00:00
|
|
|
if err := mlock.LockMemory(); err != nil {
|
2015-04-29 01:56:16 +00:00
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"Failed to lock memory: %v\n\n"+
|
|
|
|
"This usually means that the mlock syscall is not available.\n"+
|
|
|
|
"Vault uses mlock to prevent memory from being swapped to\n"+
|
|
|
|
"disk. This requires root privileges as well as a machine\n"+
|
|
|
|
"that supports mlock. Please enable mlock on your system or\n"+
|
|
|
|
"disable Vault from using it. To disable Vault from using it,\n"+
|
|
|
|
"set the `disable_mlock` configuration option in your configuration\n"+
|
|
|
|
"file.",
|
|
|
|
err)
|
2015-04-27 23:40:14 +00:00
|
|
|
}
|
2015-04-19 20:42:47 +00:00
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Construct a new AES-GCM barrier
|
2015-03-12 17:22:12 +00:00
|
|
|
barrier, err := NewAESGCMBarrier(conf.Physical)
|
2015-03-09 23:33:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("barrier setup failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-03-11 18:52:01 +00:00
|
|
|
// Make a default logger if not provided
|
2015-03-12 17:22:12 +00:00
|
|
|
if conf.Logger == nil {
|
|
|
|
conf.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
2015-03-11 18:52:01 +00:00
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Setup the core
|
|
|
|
c := &Core{
|
2015-12-14 22:58:30 +00:00
|
|
|
ha: conf.HAPhysical,
|
2015-08-27 14:50:16 +00:00
|
|
|
advertiseAddr: conf.AdvertiseAddr,
|
|
|
|
physical: conf.Physical,
|
|
|
|
barrier: barrier,
|
|
|
|
router: NewRouter(),
|
|
|
|
sealed: true,
|
|
|
|
standby: true,
|
|
|
|
logger: conf.Logger,
|
|
|
|
defaultLeaseTTL: conf.DefaultLeaseTTL,
|
|
|
|
maxLeaseTTL: conf.MaxLeaseTTL,
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
2015-03-15 23:25:38 +00:00
|
|
|
|
|
|
|
// Setup the backends
|
2015-03-18 22:21:41 +00:00
|
|
|
logicalBackends := make(map[string]logical.Factory)
|
|
|
|
for k, f := range conf.LogicalBackends {
|
|
|
|
logicalBackends[k] = f
|
2015-03-15 23:25:38 +00:00
|
|
|
}
|
2015-09-19 22:24:53 +00:00
|
|
|
_, ok := logicalBackends["generic"]
|
|
|
|
if !ok {
|
|
|
|
logicalBackends["generic"] = PassthroughBackendFactory
|
|
|
|
}
|
2015-09-10 01:58:09 +00:00
|
|
|
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
|
2015-09-04 20:58:12 +00:00
|
|
|
logicalBackends["system"] = func(config *logical.BackendConfig) (logical.Backend, error) {
|
|
|
|
return NewSystemBackend(c, config), nil
|
2015-03-15 23:25:38 +00:00
|
|
|
}
|
2015-03-18 22:21:41 +00:00
|
|
|
c.logicalBackends = logicalBackends
|
2015-03-15 23:25:38 +00:00
|
|
|
|
2015-03-31 01:07:05 +00:00
|
|
|
credentialBackends := make(map[string]logical.Factory)
|
2015-03-18 22:21:41 +00:00
|
|
|
for k, f := range conf.CredentialBackends {
|
|
|
|
credentialBackends[k] = f
|
|
|
|
}
|
2015-09-04 20:58:12 +00:00
|
|
|
credentialBackends["token"] = func(config *logical.BackendConfig) (logical.Backend, error) {
|
|
|
|
return NewTokenStore(c, config)
|
2015-03-19 02:11:52 +00:00
|
|
|
}
|
2015-03-18 22:21:41 +00:00
|
|
|
c.credentialBackends = credentialBackends
|
2015-03-27 20:45:13 +00:00
|
|
|
|
|
|
|
auditBackends := make(map[string]audit.Factory)
|
|
|
|
for k, f := range conf.AuditBackends {
|
|
|
|
auditBackends[k] = f
|
|
|
|
}
|
|
|
|
c.auditBackends = auditBackends
|
2015-03-09 23:33:27 +00:00
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2015-06-18 01:23:59 +00:00
|
|
|
// Shutdown is invoked when the Vault instance is about to be terminated. It
|
|
|
|
// should not be accessible as part of an API call as it will cause an availability
|
|
|
|
// problem. It is only used to gracefully quit in the case of HA so that failover
|
|
|
|
// happens as quickly as possible.
|
|
|
|
func (c *Core) Shutdown() error {
|
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
if c.sealed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Seal the Vault, causes a leader stepdown
|
|
|
|
return c.sealInternal()
|
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// HandleRequest is used to handle a new incoming request
|
2015-05-09 18:47:46 +00:00
|
|
|
func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err error) {
|
2015-03-11 21:31:55 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return nil, ErrSealed
|
|
|
|
}
|
2015-04-14 23:11:39 +00:00
|
|
|
if c.standby {
|
2015-04-14 21:09:11 +00:00
|
|
|
return nil, ErrStandby
|
|
|
|
}
|
2015-03-11 21:31:55 +00:00
|
|
|
|
2015-06-19 02:48:26 +00:00
|
|
|
var auth *logical.Auth
|
2015-03-31 03:26:39 +00:00
|
|
|
if c.router.LoginPath(req.Path) {
|
2015-06-19 02:48:26 +00:00
|
|
|
resp, auth, err = c.handleLoginRequest(req)
|
2015-03-31 03:26:39 +00:00
|
|
|
} else {
|
2015-06-19 02:48:26 +00:00
|
|
|
resp, auth, err = c.handleRequest(req)
|
2015-03-31 03:26:39 +00:00
|
|
|
}
|
2015-05-09 18:47:46 +00:00
|
|
|
|
|
|
|
// Ensure we don't leak internal data
|
|
|
|
if resp != nil {
|
|
|
|
if resp.Secret != nil {
|
|
|
|
resp.Secret.InternalData = nil
|
|
|
|
}
|
|
|
|
if resp.Auth != nil {
|
|
|
|
resp.Auth.InternalData = nil
|
|
|
|
}
|
|
|
|
}
|
2015-06-19 02:48:26 +00:00
|
|
|
|
|
|
|
// Create an audit trail of the response
|
|
|
|
if err := c.auditBroker.LogResponse(auth, req, resp, err); err != nil {
|
2015-10-03 01:31:46 +00:00
|
|
|
c.logger.Printf("[ERR] core: failed to audit response (request path: %s): %v",
|
|
|
|
req.Path, err)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, ErrInternalError
|
|
|
|
}
|
|
|
|
|
2015-05-09 18:47:46 +00:00
|
|
|
return
|
2015-03-31 03:26:39 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 17:14:13 +00:00
|
|
|
func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "handle_request"}, time.Now())
|
2015-06-19 01:30:18 +00:00
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Validate the token
|
2015-08-20 17:14:13 +00:00
|
|
|
auth, te, err := c.checkToken(req.Operation, req.Path, req.ClientToken)
|
|
|
|
if te != nil {
|
|
|
|
defer func() {
|
|
|
|
// Attempt to use the token (decrement num_uses)
|
2015-09-14 14:35:00 +00:00
|
|
|
// If a secret was generated and num_uses is currently 1, it will be
|
2015-10-30 16:35:42 +00:00
|
|
|
// immediately revoked; in that case, don't return the leased
|
2015-09-14 14:35:00 +00:00
|
|
|
// credentials as they are now invalid.
|
2015-10-30 16:35:42 +00:00
|
|
|
if retResp != nil &&
|
|
|
|
te != nil && te.NumUses == 1 &&
|
|
|
|
retResp.Secret != nil &&
|
|
|
|
// Some backends return a TTL even without a Lease ID
|
|
|
|
retResp.Secret.LeaseID != "" {
|
|
|
|
retResp = logical.ErrorResponse("Secret cannot be returned; token had one use left, so leased credentials were immediately revoked.")
|
2015-09-14 14:35:00 +00:00
|
|
|
}
|
2015-08-20 17:14:13 +00:00
|
|
|
if err := c.tokenStore.UseToken(te); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to use token: %v", err)
|
2015-08-20 17:37:42 +00:00
|
|
|
retResp = nil
|
|
|
|
retAuth = nil
|
2015-08-20 17:14:13 +00:00
|
|
|
retErr = ErrInternalError
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2015-03-24 18:37:07 +00:00
|
|
|
if err != nil {
|
2015-03-31 16:59:02 +00:00
|
|
|
// If it is an internal error we return that, otherwise we
|
|
|
|
// return invalid request so that the status codes can be correct
|
2015-04-01 21:11:26 +00:00
|
|
|
var errType error
|
2015-03-31 16:59:02 +00:00
|
|
|
switch err {
|
2015-04-01 21:11:26 +00:00
|
|
|
case ErrInternalError, logical.ErrPermissionDenied:
|
2015-03-31 16:59:02 +00:00
|
|
|
errType = err
|
2015-04-01 21:11:26 +00:00
|
|
|
default:
|
|
|
|
errType = logical.ErrInvalidRequest
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
2015-03-24 18:37:07 +00:00
|
|
|
|
2015-06-19 01:30:18 +00:00
|
|
|
if err := c.auditBroker.LogRequest(auth, req, err); err != nil {
|
2015-10-03 01:31:46 +00:00
|
|
|
c.logger.Printf("[ERR] core: failed to audit request with path (%s): %v",
|
|
|
|
req.Path, err)
|
2015-06-19 01:30:18 +00:00
|
|
|
}
|
|
|
|
|
2015-06-19 02:48:26 +00:00
|
|
|
return logical.ErrorResponse(err.Error()), nil, errType
|
2015-03-24 18:37:07 +00:00
|
|
|
}
|
2015-03-11 21:31:55 +00:00
|
|
|
|
2015-04-15 21:12:34 +00:00
|
|
|
// Attach the display name
|
|
|
|
req.DisplayName = auth.DisplayName
|
|
|
|
|
2015-04-01 21:33:48 +00:00
|
|
|
// Create an audit trail of the request
|
2015-06-29 22:27:28 +00:00
|
|
|
if err := c.auditBroker.LogRequest(auth, req, nil); err != nil {
|
2015-10-03 01:31:46 +00:00
|
|
|
c.logger.Printf("[ERR] core: failed to audit request with path (%s): %v",
|
|
|
|
req.Path, err)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, auth, ErrInternalError
|
2015-04-01 21:33:48 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 21:31:55 +00:00
|
|
|
// Route the request
|
2015-03-16 22:28:50 +00:00
|
|
|
resp, err := c.router.Route(req)
|
|
|
|
|
2015-03-19 22:11:42 +00:00
|
|
|
// If there is a secret, we must register it with the expiration manager.
|
2015-05-16 00:47:39 +00:00
|
|
|
// We exclude renewal of a lease, since it does not need to be re-registered
|
|
|
|
if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew/") {
|
2015-08-28 21:25:09 +00:00
|
|
|
// Get the SystemView for the mount
|
2015-09-04 20:58:12 +00:00
|
|
|
sysView := c.router.MatchingSystemView(req.Path)
|
|
|
|
if sysView == nil {
|
|
|
|
c.logger.Println("[ERR] core: unable to retrieve system view from router")
|
2015-08-28 21:25:09 +00:00
|
|
|
return nil, auth, ErrInternalError
|
|
|
|
}
|
|
|
|
|
2015-04-03 22:42:34 +00:00
|
|
|
// Apply the default lease if none given
|
2015-08-21 00:47:17 +00:00
|
|
|
if resp.Secret.TTL == 0 {
|
2015-10-10 00:26:39 +00:00
|
|
|
resp.Secret.TTL = sysView.DefaultLeaseTTL()
|
2015-04-03 22:42:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Limit the lease duration
|
2015-09-10 19:09:34 +00:00
|
|
|
maxTTL := sysView.MaxLeaseTTL()
|
2015-09-02 19:56:58 +00:00
|
|
|
if resp.Secret.TTL > maxTTL {
|
|
|
|
resp.Secret.TTL = maxTTL
|
2015-04-03 22:42:34 +00:00
|
|
|
}
|
|
|
|
|
2015-09-19 22:24:53 +00:00
|
|
|
// Generic mounts should return the TTL but not register
|
|
|
|
// for a lease as this provides a massive slowdown
|
|
|
|
registerLease := true
|
2015-09-21 20:57:41 +00:00
|
|
|
matchingBackend := c.router.MatchingBackend(req.Path)
|
|
|
|
if matchingBackend == nil {
|
|
|
|
c.logger.Println("[ERR] core: unable to retrieve generic backend from router")
|
|
|
|
return nil, auth, ErrInternalError
|
|
|
|
}
|
|
|
|
if ptbe, ok := matchingBackend.(*PassthroughBackend); ok {
|
|
|
|
if !ptbe.GeneratesLeases() {
|
2015-09-19 22:24:53 +00:00
|
|
|
registerLease = false
|
|
|
|
resp.Secret.Renewable = false
|
|
|
|
}
|
|
|
|
}
|
2015-09-21 20:57:41 +00:00
|
|
|
|
2015-09-19 22:24:53 +00:00
|
|
|
if registerLease {
|
|
|
|
leaseID, err := c.expiration.Register(req, resp)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf(
|
|
|
|
"[ERR] core: failed to register lease "+
|
2015-10-03 01:31:46 +00:00
|
|
|
"(request path: %s): %v", req.Path, err)
|
2015-09-19 22:24:53 +00:00
|
|
|
return nil, auth, ErrInternalError
|
|
|
|
}
|
|
|
|
resp.Secret.LeaseID = leaseID
|
|
|
|
}
|
2015-03-16 22:28:50 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 01:05:23 +00:00
|
|
|
// Only the token store is allowed to return an auth block, for any
|
2015-06-17 22:22:50 +00:00
|
|
|
// other request this is an internal error. We exclude renewal of a token,
|
|
|
|
// since it does not need to be re-registered
|
|
|
|
if resp != nil && resp.Auth != nil && !strings.HasPrefix(req.Path, "auth/token/renew/") {
|
2015-04-03 01:05:23 +00:00
|
|
|
if !strings.HasPrefix(req.Path, "auth/token/") {
|
|
|
|
c.logger.Printf(
|
|
|
|
"[ERR] core: unexpected Auth response for non-token backend "+
|
2015-10-03 01:31:46 +00:00
|
|
|
"(request path: %s)", req.Path)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, auth, ErrInternalError
|
2015-04-03 01:05:23 +00:00
|
|
|
}
|
|
|
|
|
2015-10-10 00:26:39 +00:00
|
|
|
sysView := c.router.MatchingSystemView(req.Path)
|
|
|
|
if sysView == nil {
|
|
|
|
c.logger.Println("[ERR] core: unable to retrieve system view from router")
|
|
|
|
return nil, auth, ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply the default lease if none given
|
2015-08-21 00:47:17 +00:00
|
|
|
if resp.Auth.TTL == 0 && !strListContains(resp.Auth.Policies, "root") {
|
2015-10-10 00:26:39 +00:00
|
|
|
resp.Auth.TTL = sysView.DefaultLeaseTTL()
|
2015-04-03 01:05:23 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 22:42:34 +00:00
|
|
|
// Limit the lease duration
|
2015-10-10 00:26:39 +00:00
|
|
|
maxTTL := sysView.MaxLeaseTTL()
|
|
|
|
if resp.Auth.TTL > maxTTL {
|
|
|
|
resp.Auth.TTL = maxTTL
|
2015-04-03 22:42:34 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 01:05:23 +00:00
|
|
|
// Register with the expiration manager
|
|
|
|
if err := c.expiration.RegisterAuth(req.Path, resp.Auth); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to register token lease "+
|
2015-10-03 01:31:46 +00:00
|
|
|
"(request path: %s): %v", req.Path, err)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, auth, ErrInternalError
|
2015-04-03 01:05:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-16 22:28:50 +00:00
|
|
|
// Return the response and error
|
2015-06-19 02:48:26 +00:00
|
|
|
return resp, auth, err
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 03:26:39 +00:00
|
|
|
// handleLoginRequest is used to handle a login request, which is an
|
|
|
|
// unauthenticated request to the backend.
|
2015-06-19 02:48:26 +00:00
|
|
|
func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *logical.Auth, error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "handle_login_request"}, time.Now())
|
|
|
|
|
2015-04-01 21:48:37 +00:00
|
|
|
// Create an audit trail of the request, auth is not available on login requests
|
2015-06-29 22:27:28 +00:00
|
|
|
if err := c.auditBroker.LogRequest(nil, req, nil); err != nil {
|
2015-10-03 01:31:46 +00:00
|
|
|
c.logger.Printf("[ERR] core: failed to audit request with path %s: %v",
|
|
|
|
req.Path, err)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, nil, ErrInternalError
|
2015-04-01 21:48:37 +00:00
|
|
|
}
|
|
|
|
|
2015-03-23 20:56:43 +00:00
|
|
|
// Route the request
|
2015-03-31 03:26:39 +00:00
|
|
|
resp, err := c.router.Route(req)
|
2015-03-23 20:56:43 +00:00
|
|
|
|
2015-05-09 18:51:58 +00:00
|
|
|
// A login request should never return a secret!
|
|
|
|
if resp != nil && resp.Secret != nil {
|
|
|
|
c.logger.Printf("[ERR] core: unexpected Secret response for login path"+
|
2015-10-03 01:31:46 +00:00
|
|
|
"(request path: %s)", req.Path)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, nil, ErrInternalError
|
2015-05-09 18:51:58 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 03:26:39 +00:00
|
|
|
// If the response generated an authentication, then generate the token
|
2015-04-01 21:48:37 +00:00
|
|
|
var auth *logical.Auth
|
2015-03-31 03:26:39 +00:00
|
|
|
if resp != nil && resp.Auth != nil {
|
2015-04-03 00:52:11 +00:00
|
|
|
auth = resp.Auth
|
|
|
|
|
2015-04-15 21:12:34 +00:00
|
|
|
// Determine the source of the login
|
|
|
|
source := c.router.MatchingMount(req.Path)
|
2015-04-16 00:19:59 +00:00
|
|
|
source = strings.TrimPrefix(source, credentialRoutePrefix)
|
2015-04-15 21:12:34 +00:00
|
|
|
source = strings.Replace(source, "/", "-", -1)
|
|
|
|
|
|
|
|
// Prepend the source to the display name
|
|
|
|
auth.DisplayName = strings.TrimSuffix(source+auth.DisplayName, "-")
|
|
|
|
|
2015-09-18 20:33:52 +00:00
|
|
|
sysView := c.router.MatchingSystemView(req.Path)
|
|
|
|
if sysView == nil {
|
|
|
|
c.logger.Printf("[ERR] core: unable to look up sys view for login path"+
|
2015-10-03 01:31:46 +00:00
|
|
|
"(request path: %s)", req.Path)
|
2015-09-18 20:33:52 +00:00
|
|
|
return nil, nil, ErrInternalError
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the default lease if non-provided, root tokens are exempt
|
|
|
|
if auth.TTL == 0 && !strListContains(auth.Policies, "root") {
|
|
|
|
auth.TTL = sysView.DefaultLeaseTTL()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit the lease duration
|
|
|
|
if auth.TTL > sysView.MaxLeaseTTL() {
|
|
|
|
auth.TTL = sysView.MaxLeaseTTL()
|
|
|
|
}
|
|
|
|
|
2015-03-23 20:56:43 +00:00
|
|
|
// Generate a token
|
|
|
|
te := TokenEntry{
|
2015-09-18 20:33:52 +00:00
|
|
|
Path: req.Path,
|
|
|
|
Policies: auth.Policies,
|
|
|
|
Meta: auth.Metadata,
|
|
|
|
DisplayName: auth.DisplayName,
|
|
|
|
CreationTime: time.Now().Unix(),
|
|
|
|
TTL: auth.TTL,
|
2015-03-23 20:56:43 +00:00
|
|
|
}
|
2015-09-18 20:33:52 +00:00
|
|
|
|
2015-11-06 22:27:15 +00:00
|
|
|
if !strListSubset(te.Policies, []string{"root"}) {
|
|
|
|
te.Policies = append(te.Policies, "default")
|
|
|
|
}
|
|
|
|
|
2015-10-30 14:59:26 +00:00
|
|
|
if err := c.tokenStore.create(&te); err != nil {
|
2015-03-23 20:56:43 +00:00
|
|
|
c.logger.Printf("[ERR] core: failed to create token: %v", err)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, auth, ErrInternalError
|
2015-03-23 20:56:43 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 03:26:39 +00:00
|
|
|
// Populate the client token
|
2015-09-10 18:43:47 +00:00
|
|
|
auth.ClientToken = te.ID
|
2015-03-23 20:56:43 +00:00
|
|
|
|
2015-04-03 00:52:11 +00:00
|
|
|
// Register with the expiration manager
|
|
|
|
if err := c.expiration.RegisterAuth(req.Path, auth); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to register token lease "+
|
2015-10-03 01:31:46 +00:00
|
|
|
"(request path: %s): %v", req.Path, err)
|
2015-06-19 02:48:26 +00:00
|
|
|
return nil, auth, ErrInternalError
|
2015-04-03 00:52:11 +00:00
|
|
|
}
|
2015-04-15 21:12:34 +00:00
|
|
|
|
|
|
|
// Attach the display name, might be used by audit backends
|
|
|
|
req.DisplayName = auth.DisplayName
|
2015-03-23 20:56:43 +00:00
|
|
|
}
|
2015-03-31 03:26:39 +00:00
|
|
|
|
2015-06-19 02:48:26 +00:00
|
|
|
return resp, auth, err
|
2015-03-23 20:56:43 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
func (c *Core) checkToken(
|
2015-08-20 17:14:13 +00:00
|
|
|
op logical.Operation, path string, token string) (*logical.Auth, *TokenEntry, error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now())
|
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Ensure there is a client token
|
|
|
|
if token == "" {
|
2015-08-20 17:14:13 +00:00
|
|
|
return nil, nil, fmt.Errorf("missing client token")
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-01 12:21:47 +00:00
|
|
|
if c.tokenStore == nil {
|
|
|
|
c.logger.Printf("[ERR] core: token store is unavailable")
|
|
|
|
return nil, nil, ErrInternalError
|
|
|
|
}
|
|
|
|
|
2015-03-31 16:59:02 +00:00
|
|
|
// Resolve the token policy
|
|
|
|
te, err := c.tokenStore.Lookup(token)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to lookup token: %v", err)
|
2015-08-20 17:14:13 +00:00
|
|
|
return nil, nil, ErrInternalError
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the token is valid
|
|
|
|
if te == nil {
|
2015-08-20 17:14:13 +00:00
|
|
|
return nil, nil, logical.ErrPermissionDenied
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the corresponding ACL object
|
2015-11-06 16:52:26 +00:00
|
|
|
acl, err := c.policyStore.ACL(te.Policies...)
|
2015-03-31 16:59:02 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to construct ACL: %v", err)
|
2015-08-20 17:14:13 +00:00
|
|
|
return nil, nil, ErrInternalError
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this is a root protected path
|
2015-04-01 21:03:17 +00:00
|
|
|
if c.router.RootPath(path) && !acl.RootPrivilege(path) {
|
2015-08-20 17:14:13 +00:00
|
|
|
return nil, nil, logical.ErrPermissionDenied
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the standard non-root ACLs
|
|
|
|
if !acl.AllowOperation(op, path) {
|
2015-08-20 17:14:13 +00:00
|
|
|
return nil, nil, logical.ErrPermissionDenied
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2015-04-01 21:33:48 +00:00
|
|
|
// Create the auth response
|
|
|
|
auth := &logical.Auth{
|
|
|
|
ClientToken: token,
|
|
|
|
Policies: te.Policies,
|
|
|
|
Metadata: te.Meta,
|
2015-04-15 21:12:34 +00:00
|
|
|
DisplayName: te.DisplayName,
|
2015-04-01 21:33:48 +00:00
|
|
|
}
|
2015-08-20 17:14:13 +00:00
|
|
|
return auth, te, nil
|
2015-03-31 16:59:02 +00:00
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// Initialized checks if the Vault is already initialized
|
|
|
|
func (c *Core) Initialized() (bool, error) {
|
2015-03-10 00:45:34 +00:00
|
|
|
// Check the barrier first
|
|
|
|
init, err := c.barrier.Initialized()
|
2015-03-11 22:50:27 +00:00
|
|
|
if err != nil {
|
2015-03-10 00:45:34 +00:00
|
|
|
c.logger.Printf("[ERR] core: barrier init check failed: %v", err)
|
|
|
|
return false, err
|
|
|
|
}
|
2015-03-11 22:50:27 +00:00
|
|
|
if !init {
|
|
|
|
return false, nil
|
|
|
|
}
|
2015-03-10 00:45:34 +00:00
|
|
|
if !init {
|
|
|
|
c.logger.Printf("[INFO] core: security barrier not initialized")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the seal configuration
|
|
|
|
sealConf, err := c.SealConfig()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if sealConf == nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return true, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize is used to initialize the Vault with the given
|
|
|
|
// configurations.
|
2015-03-10 00:45:34 +00:00
|
|
|
func (c *Core) Initialize(config *SealConfig) (*InitResult, error) {
|
|
|
|
// Check if the seal configuraiton is valid
|
|
|
|
if err := config.Validate(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: invalid seal configuration: %v", err)
|
|
|
|
return nil, fmt.Errorf("invalid seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Avoid an initialization race
|
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
|
|
|
|
// Check if we are initialized
|
|
|
|
init, err := c.Initialized()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if init {
|
|
|
|
return nil, ErrAlreadyInit
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encode the seal configuration
|
|
|
|
buf, err := json.Marshal(config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to encode seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the seal configuration
|
|
|
|
pe := &physical.Entry{
|
|
|
|
Key: coreSealConfigPath,
|
|
|
|
Value: buf,
|
|
|
|
}
|
|
|
|
if err := c.physical.Put(pe); err != nil {
|
2015-05-28 18:40:01 +00:00
|
|
|
c.logger.Printf("[ERR] core: failed to write seal configuration: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to write seal configuration: %v", err)
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a master key
|
|
|
|
masterKey, err := c.barrier.GenerateKey()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to generate master key: %v", err)
|
|
|
|
return nil, fmt.Errorf("master key generation failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the master key if only a single key part is used
|
|
|
|
results := new(InitResult)
|
2015-03-11 18:34:08 +00:00
|
|
|
if config.SecretShares == 1 {
|
|
|
|
results.SecretShares = append(results.SecretShares, masterKey)
|
2015-03-10 00:45:34 +00:00
|
|
|
} else {
|
2015-03-11 18:34:08 +00:00
|
|
|
// Split the master key using the Shamir algorithm
|
|
|
|
shares, err := shamir.Split(masterKey, config.SecretShares, config.SecretThreshold)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to generate shares: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to generate shares: %v", err)
|
|
|
|
}
|
|
|
|
results.SecretShares = shares
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-05-08 18:06:39 +00:00
|
|
|
|
2015-08-25 22:33:58 +00:00
|
|
|
if len(config.PGPKeys) > 0 {
|
2015-12-16 21:56:15 +00:00
|
|
|
_, encryptedShares, err := pgpkeys.EncryptShares(results.SecretShares, config.PGPKeys)
|
2015-08-25 22:33:58 +00:00
|
|
|
if err != nil {
|
2015-08-25 21:24:19 +00:00
|
|
|
return nil, err
|
2015-08-25 16:32:45 +00:00
|
|
|
}
|
2015-08-25 22:33:58 +00:00
|
|
|
results.SecretShares = encryptedShares
|
2015-08-25 16:32:45 +00:00
|
|
|
}
|
|
|
|
|
2015-05-08 18:06:39 +00:00
|
|
|
// Initialize the barrier
|
|
|
|
if err := c.barrier.Initialize(masterKey); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to initialize barrier: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to initialize barrier: %v", err)
|
|
|
|
}
|
2015-05-28 21:15:06 +00:00
|
|
|
c.logger.Printf("[INFO] core: security barrier initialized (shares: %d, threshold %d)",
|
|
|
|
config.SecretShares, config.SecretThreshold)
|
2015-03-24 00:31:30 +00:00
|
|
|
|
|
|
|
// Unseal the barrier
|
|
|
|
if err := c.barrier.Unseal(masterKey); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to unseal barrier: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to unseal barrier: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the barrier is re-sealed
|
|
|
|
defer func() {
|
|
|
|
if err := c.barrier.Seal(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to seal barrier: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Perform initial setup
|
|
|
|
if err := c.postUnseal(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: post-unseal setup failed: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a new root token
|
2015-10-30 14:59:26 +00:00
|
|
|
rootToken, err := c.tokenStore.rootToken()
|
2015-03-24 00:31:30 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: root token generation failed: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
results.RootToken = rootToken.ID
|
|
|
|
c.logger.Printf("[INFO] core: root token generated")
|
|
|
|
|
|
|
|
// Prepare to re-seal
|
|
|
|
if err := c.preSeal(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: pre-seal teardown failed: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-03-10 00:45:34 +00:00
|
|
|
return results, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sealed checks if the Vault is current sealed
|
|
|
|
func (c *Core) Sealed() (bool, error) {
|
2015-03-10 00:45:34 +00:00
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
return c.sealed, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 21:09:11 +00:00
|
|
|
// Standby checks if the Vault is in standby mode
|
|
|
|
func (c *Core) Standby() (bool, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
2015-04-14 23:11:39 +00:00
|
|
|
return c.standby, nil
|
2015-04-14 21:09:11 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 23:53:40 +00:00
|
|
|
// Leader is used to get the current active leader
|
|
|
|
func (c *Core) Leader() (bool, string, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
// Check if HA enabled
|
|
|
|
if c.ha == nil {
|
|
|
|
return false, "", ErrHANotEnabled
|
|
|
|
}
|
|
|
|
|
2015-04-20 19:19:09 +00:00
|
|
|
// Check if sealed
|
|
|
|
if c.sealed {
|
|
|
|
return false, "", ErrSealed
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:53:40 +00:00
|
|
|
// Check if we are the leader
|
|
|
|
if !c.standby {
|
|
|
|
return true, c.advertiseAddr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize a lock
|
|
|
|
lock, err := c.ha.LockWith(coreLockPath, "read")
|
|
|
|
if err != nil {
|
|
|
|
return false, "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the value
|
|
|
|
held, value, err := lock.Value()
|
|
|
|
if err != nil {
|
|
|
|
return false, "", err
|
|
|
|
}
|
|
|
|
if !held {
|
|
|
|
return false, "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Value is the UUID of the leader, fetch the key
|
|
|
|
key := coreLeaderPrefix + value
|
|
|
|
entry, err := c.barrier.Get(key)
|
|
|
|
if err != nil {
|
|
|
|
return false, "", err
|
|
|
|
}
|
|
|
|
if entry == nil {
|
|
|
|
return false, "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Leader address is in the entry
|
|
|
|
return false, string(entry.Value), nil
|
|
|
|
}
|
|
|
|
|
2015-03-09 23:33:27 +00:00
|
|
|
// SealConfiguration is used to return information
|
|
|
|
// about the configuration of the Vault and it's current
|
|
|
|
// status.
|
|
|
|
func (c *Core) SealConfig() (*SealConfig, error) {
|
2015-03-10 00:45:34 +00:00
|
|
|
// Fetch the core configuration
|
|
|
|
pe, err := c.physical.Get(coreSealConfigPath)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to read seal configuration: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to check seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the seal configuration is missing, we are not initialized
|
|
|
|
if pe == nil {
|
|
|
|
c.logger.Printf("[INFO] core: seal configuration missing, not initialized")
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the barrier entry
|
|
|
|
var conf SealConfig
|
|
|
|
if err := json.Unmarshal(pe.Value, &conf); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to decode seal configuration: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to decode seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for a valid seal configuration
|
|
|
|
if err := conf.Validate(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: invalid seal configuration: %v", err)
|
|
|
|
return nil, fmt.Errorf("seal validation failed: %v", err)
|
|
|
|
}
|
2015-08-25 16:32:45 +00:00
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
return &conf, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 18:52:01 +00:00
|
|
|
// SecretProgress returns the number of keys provided so far
|
|
|
|
func (c *Core) SecretProgress() int {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
return len(c.unlockParts)
|
|
|
|
}
|
|
|
|
|
2015-10-28 19:59:39 +00:00
|
|
|
// ResetUnsealProcess removes the current unlock parts from memory, to reset
|
|
|
|
// the unsealing process
|
|
|
|
func (c *Core) ResetUnsealProcess() {
|
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
if !c.sealed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.unlockParts = nil
|
|
|
|
}
|
|
|
|
|
2015-03-15 00:47:11 +00:00
|
|
|
// Unseal is used to provide one of the key parts to unseal the Vault.
|
2015-03-15 01:25:36 +00:00
|
|
|
//
|
|
|
|
// They key given as a parameter will automatically be zerod after
|
|
|
|
// this method is done with it. If you want to keep the key around, a copy
|
|
|
|
// should be made.
|
|
|
|
func (c *Core) Unseal(key []byte) (bool, error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
|
|
|
|
|
2015-03-12 18:20:27 +00:00
|
|
|
// Verify the key length
|
|
|
|
min, max := c.barrier.KeyLength()
|
|
|
|
max += shamir.ShareOverhead
|
|
|
|
if len(key) < min {
|
|
|
|
return false, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
|
|
|
|
}
|
|
|
|
if len(key) > max {
|
|
|
|
return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
|
|
|
|
}
|
|
|
|
|
2015-03-11 18:43:36 +00:00
|
|
|
// Get the seal configuration
|
|
|
|
config, err := c.SealConfig()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the barrier is initialized
|
|
|
|
if config == nil {
|
|
|
|
return false, ErrNotInit
|
|
|
|
}
|
|
|
|
|
2015-03-10 00:45:34 +00:00
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
|
2015-03-11 18:43:36 +00:00
|
|
|
// Check if already unsealed
|
|
|
|
if !c.sealed {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we already have this piece
|
|
|
|
for _, existing := range c.unlockParts {
|
|
|
|
if bytes.Equal(existing, key) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store this key
|
|
|
|
c.unlockParts = append(c.unlockParts, key)
|
|
|
|
|
|
|
|
// Check if we don't have enough keys to unlock
|
|
|
|
if len(c.unlockParts) < config.SecretThreshold {
|
|
|
|
c.logger.Printf("[DEBUG] core: cannot unseal, have %d of %d keys",
|
|
|
|
len(c.unlockParts), config.SecretThreshold)
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recover the master key
|
|
|
|
var masterKey []byte
|
|
|
|
if config.SecretThreshold == 1 {
|
|
|
|
masterKey = c.unlockParts[0]
|
|
|
|
c.unlockParts = nil
|
|
|
|
} else {
|
|
|
|
masterKey, err = shamir.Combine(c.unlockParts)
|
|
|
|
c.unlockParts = nil
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("failed to compute master key: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer memzero(masterKey)
|
|
|
|
|
|
|
|
// Attempt to unlock
|
|
|
|
if err := c.barrier.Unseal(masterKey); err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2015-03-13 18:34:40 +00:00
|
|
|
c.logger.Printf("[INFO] core: vault is unsealed")
|
2015-03-11 18:43:36 +00:00
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Do post-unseal setup if HA is not enabled
|
|
|
|
if c.ha == nil {
|
|
|
|
if err := c.postUnseal(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: post-unseal setup failed: %v", err)
|
|
|
|
c.barrier.Seal()
|
|
|
|
c.logger.Printf("[WARN] core: vault is sealed")
|
|
|
|
return false, err
|
|
|
|
}
|
2015-12-17 18:48:08 +00:00
|
|
|
c.standby = false
|
2015-04-14 21:06:15 +00:00
|
|
|
} else {
|
|
|
|
// Go to standby mode, wait until we are active to unseal
|
|
|
|
c.standbyDoneCh = make(chan struct{})
|
|
|
|
c.standbyStopCh = make(chan struct{})
|
2015-04-14 23:11:39 +00:00
|
|
|
go c.runStandby(c.standbyDoneCh, c.standbyStopCh)
|
2015-03-11 22:19:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-11 18:43:36 +00:00
|
|
|
// Success!
|
|
|
|
c.sealed = false
|
|
|
|
return true, nil
|
2015-03-09 23:33:27 +00:00
|
|
|
}
|
2015-03-10 00:45:34 +00:00
|
|
|
|
2015-03-13 18:16:24 +00:00
|
|
|
// Seal is used to re-seal the Vault. This requires the Vault to
|
2015-03-10 00:45:34 +00:00
|
|
|
// be unsealed again to perform any further operations.
|
2015-08-20 17:14:13 +00:00
|
|
|
func (c *Core) Seal(token string) (retErr error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "seal"}, time.Now())
|
2015-03-10 00:45:34 +00:00
|
|
|
c.stateLock.Lock()
|
|
|
|
defer c.stateLock.Unlock()
|
|
|
|
if c.sealed {
|
|
|
|
return nil
|
|
|
|
}
|
2015-03-31 16:59:02 +00:00
|
|
|
|
|
|
|
// Validate the token is a root token
|
2015-08-20 17:14:13 +00:00
|
|
|
_, te, err := c.checkToken(logical.WriteOperation, "sys/seal", token)
|
|
|
|
if te != nil {
|
2015-08-20 17:37:42 +00:00
|
|
|
// Attempt to use the token (decrement num_uses)
|
|
|
|
if err := c.tokenStore.UseToken(te); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to use token: %v", err)
|
|
|
|
retErr = ErrInternalError
|
|
|
|
}
|
2015-08-20 17:14:13 +00:00
|
|
|
}
|
2015-03-31 16:59:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-06-18 01:23:59 +00:00
|
|
|
// Seal the Vault
|
2015-08-20 17:37:42 +00:00
|
|
|
err = c.sealInternal()
|
|
|
|
if err == nil && retErr == ErrInternalError {
|
|
|
|
c.logger.Printf("[ERR] core: core is successfully sealed but another error occurred during the operation")
|
|
|
|
} else {
|
|
|
|
retErr = err
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2015-06-18 01:23:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// sealInternal is an internal method used to seal the vault.
|
|
|
|
// It does not do any authorization checking. The stateLock must
|
|
|
|
// be held prior to calling.
|
|
|
|
func (c *Core) sealInternal() error {
|
2015-04-14 21:06:15 +00:00
|
|
|
// Enable that we are sealed to prevent furthur transactions
|
2015-03-10 00:45:34 +00:00
|
|
|
c.sealed = true
|
2015-03-13 18:16:24 +00:00
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Do pre-seal teardown if HA is not enabled
|
|
|
|
if c.ha == nil {
|
|
|
|
if err := c.preSeal(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: pre-seal teardown failed: %v", err)
|
|
|
|
return fmt.Errorf("internal error")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Signal the standby goroutine to shutdown, wait for completion
|
|
|
|
close(c.standbyStopCh)
|
|
|
|
|
|
|
|
// Release the lock while we wait to avoid deadlocking
|
|
|
|
c.stateLock.Unlock()
|
|
|
|
<-c.standbyDoneCh
|
|
|
|
c.stateLock.Lock()
|
2015-03-13 18:16:24 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 18:34:40 +00:00
|
|
|
if err := c.barrier.Seal(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.logger.Printf("[INFO] core: vault is sealed")
|
|
|
|
return nil
|
2015-03-10 00:45:34 +00:00
|
|
|
}
|
2015-03-11 22:19:41 +00:00
|
|
|
|
2015-05-28 18:40:01 +00:00
|
|
|
// RekeyProgress is used to return the rekey progress (num shares)
|
|
|
|
func (c *Core) RekeyProgress() (int, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return 0, ErrSealed
|
|
|
|
}
|
2015-05-29 18:52:37 +00:00
|
|
|
if c.standby {
|
|
|
|
return 0, ErrStandby
|
|
|
|
}
|
2015-05-28 18:40:01 +00:00
|
|
|
|
|
|
|
c.rekeyLock.Lock()
|
|
|
|
defer c.rekeyLock.Unlock()
|
|
|
|
return len(c.rekeyProgress), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RekeyConfig is used to read the rekey configuration
|
|
|
|
func (c *Core) RekeyConfig() (*SealConfig, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return nil, ErrSealed
|
|
|
|
}
|
2015-05-29 18:52:37 +00:00
|
|
|
if c.standby {
|
|
|
|
return nil, ErrStandby
|
|
|
|
}
|
2015-05-28 18:40:01 +00:00
|
|
|
|
|
|
|
c.rekeyLock.Lock()
|
|
|
|
defer c.rekeyLock.Unlock()
|
|
|
|
|
|
|
|
// Copy the seal config if any
|
|
|
|
var conf *SealConfig
|
|
|
|
if c.rekeyConfig != nil {
|
|
|
|
conf = new(SealConfig)
|
|
|
|
*conf = *c.rekeyConfig
|
|
|
|
}
|
|
|
|
return conf, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RekeyInit is used to initialize the rekey settings
|
|
|
|
func (c *Core) RekeyInit(config *SealConfig) error {
|
|
|
|
// Check if the seal configuraiton is valid
|
|
|
|
if err := config.Validate(); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: invalid rekey seal configuration: %v", err)
|
|
|
|
return fmt.Errorf("invalid rekey seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return ErrSealed
|
|
|
|
}
|
2015-05-28 22:26:35 +00:00
|
|
|
if c.standby {
|
|
|
|
return ErrStandby
|
|
|
|
}
|
2015-05-28 18:40:01 +00:00
|
|
|
|
|
|
|
// Prevent multiple concurrent re-keys
|
|
|
|
if c.rekeyConfig != nil {
|
2015-05-28 19:07:52 +00:00
|
|
|
return fmt.Errorf("rekey already in progress")
|
2015-05-28 18:40:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the configuration
|
|
|
|
c.rekeyConfig = new(SealConfig)
|
|
|
|
*c.rekeyConfig = *config
|
2015-12-16 21:56:15 +00:00
|
|
|
|
|
|
|
// Initialize the nonce
|
|
|
|
c.rekeyConfig.Nonce = uuid.GenerateUUID()
|
|
|
|
c.logger.Printf("[INFO] core: rekey initialized (nonce: %s, shares: %d, threshold: %d)",
|
|
|
|
c.rekeyConfig.Nonce, c.rekeyConfig.SecretShares, c.rekeyConfig.SecretThreshold)
|
2015-05-28 18:40:01 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RekeyUpdate is used to provide a new key part
|
2015-12-16 21:56:15 +00:00
|
|
|
func (c *Core) RekeyUpdate(key []byte, nonce string) (*RekeyResult, error) {
|
2015-05-28 18:40:01 +00:00
|
|
|
// Verify the key length
|
|
|
|
min, max := c.barrier.KeyLength()
|
|
|
|
max += shamir.ShareOverhead
|
|
|
|
if len(key) < min {
|
|
|
|
return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
|
|
|
|
}
|
|
|
|
if len(key) > max {
|
|
|
|
return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the seal configuration
|
|
|
|
config, err := c.SealConfig()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the barrier is initialized
|
|
|
|
if config == nil {
|
|
|
|
return nil, ErrNotInit
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we are already unsealed
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return nil, ErrSealed
|
|
|
|
}
|
2015-05-28 22:26:35 +00:00
|
|
|
if c.standby {
|
|
|
|
return nil, ErrStandby
|
|
|
|
}
|
2015-05-28 18:40:01 +00:00
|
|
|
|
|
|
|
c.rekeyLock.Lock()
|
|
|
|
defer c.rekeyLock.Unlock()
|
|
|
|
|
2015-05-28 19:07:52 +00:00
|
|
|
// Ensure a rekey is in progress
|
|
|
|
if c.rekeyConfig == nil {
|
|
|
|
return nil, fmt.Errorf("no rekey in progress")
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
if nonce != c.rekeyConfig.Nonce {
|
|
|
|
return nil, fmt.Errorf("incorrect nonce supplied; nonce for this rekey operation is %s", c.rekeyConfig.Nonce)
|
|
|
|
}
|
|
|
|
|
2015-05-28 18:40:01 +00:00
|
|
|
// Check if we already have this piece
|
|
|
|
for _, existing := range c.rekeyProgress {
|
|
|
|
if bytes.Equal(existing, key) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store this key
|
|
|
|
c.rekeyProgress = append(c.rekeyProgress, key)
|
|
|
|
|
|
|
|
// Check if we don't have enough keys to unlock
|
|
|
|
if len(c.rekeyProgress) < config.SecretThreshold {
|
|
|
|
c.logger.Printf("[DEBUG] core: cannot rekey, have %d of %d keys",
|
|
|
|
len(c.rekeyProgress), config.SecretThreshold)
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recover the master key
|
|
|
|
var masterKey []byte
|
|
|
|
if config.SecretThreshold == 1 {
|
|
|
|
masterKey = c.rekeyProgress[0]
|
|
|
|
c.rekeyProgress = nil
|
|
|
|
} else {
|
|
|
|
masterKey, err = shamir.Combine(c.rekeyProgress)
|
|
|
|
c.rekeyProgress = nil
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to compute master key: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the master key
|
|
|
|
if err := c.barrier.VerifyMaster(masterKey); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: rekey aborted, master key verification failed: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a new master key
|
|
|
|
newMasterKey, err := c.barrier.GenerateKey()
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to generate master key: %v", err)
|
|
|
|
return nil, fmt.Errorf("master key generation failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the master key if only a single key part is used
|
2016-01-06 18:14:23 +00:00
|
|
|
results := &RekeyResult{
|
|
|
|
Backup: c.rekeyConfig.Backup,
|
|
|
|
}
|
|
|
|
|
2015-05-28 18:40:01 +00:00
|
|
|
if c.rekeyConfig.SecretShares == 1 {
|
|
|
|
results.SecretShares = append(results.SecretShares, newMasterKey)
|
|
|
|
} else {
|
|
|
|
// Split the master key using the Shamir algorithm
|
|
|
|
shares, err := shamir.Split(newMasterKey, c.rekeyConfig.SecretShares, c.rekeyConfig.SecretThreshold)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to generate shares: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to generate shares: %v", err)
|
|
|
|
}
|
|
|
|
results.SecretShares = shares
|
|
|
|
}
|
|
|
|
|
2015-08-25 22:33:58 +00:00
|
|
|
if len(c.rekeyConfig.PGPKeys) > 0 {
|
2016-01-06 18:14:23 +00:00
|
|
|
results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(results.SecretShares, c.rekeyConfig.PGPKeys)
|
2015-08-25 22:33:58 +00:00
|
|
|
if err != nil {
|
2015-08-25 21:24:19 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-12-16 21:56:15 +00:00
|
|
|
|
2016-01-06 18:14:23 +00:00
|
|
|
if c.rekeyConfig.Backup {
|
|
|
|
backupInfo := map[string]string{}
|
|
|
|
for i := 0; i < len(results.PGPFingerprints); i++ {
|
|
|
|
encShare := bytes.NewBuffer(results.SecretShares[i])
|
|
|
|
backupInfo[results.PGPFingerprints[i]] = hex.EncodeToString(encShare.Bytes())
|
|
|
|
}
|
2015-12-16 21:56:15 +00:00
|
|
|
|
2016-01-06 18:14:23 +00:00
|
|
|
backupVals := &RekeyBackup{
|
|
|
|
Nonce: c.rekeyConfig.Nonce,
|
|
|
|
Keys: backupInfo,
|
|
|
|
}
|
|
|
|
buf, err := json.Marshal(backupVals)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to marshal unseal key backup: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to marshal unseal key backup: %v", err)
|
|
|
|
}
|
|
|
|
pe := &physical.Entry{
|
|
|
|
Key: coreUnsealKeysBackupPath,
|
|
|
|
Value: buf,
|
|
|
|
}
|
|
|
|
if err = c.physical.Put(pe); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to save unseal key backup: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to save unseal key backup: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2015-08-25 21:24:19 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 18:40:01 +00:00
|
|
|
// Encode the seal configuration
|
|
|
|
buf, err := json.Marshal(c.rekeyConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to encode seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rekey the barrier
|
|
|
|
if err := c.barrier.Rekey(newMasterKey); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to rekey barrier: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to rekey barrier: %v", err)
|
|
|
|
}
|
2015-05-28 21:15:06 +00:00
|
|
|
c.logger.Printf("[INFO] core: security barrier rekeyed (shares: %d, threshold: %d)",
|
|
|
|
c.rekeyConfig.SecretShares, c.rekeyConfig.SecretThreshold)
|
2015-05-28 18:40:01 +00:00
|
|
|
|
|
|
|
// Store the seal configuration
|
|
|
|
pe := &physical.Entry{
|
|
|
|
Key: coreSealConfigPath,
|
|
|
|
Value: buf,
|
|
|
|
}
|
|
|
|
if err := c.physical.Put(pe); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to update seal configuration: %v", err)
|
|
|
|
return nil, fmt.Errorf("failed to update seal configuration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done!
|
|
|
|
c.rekeyProgress = nil
|
|
|
|
c.rekeyConfig = nil
|
|
|
|
return results, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RekeyCancel is used to cancel an inprogress rekey
|
|
|
|
func (c *Core) RekeyCancel() error {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return ErrSealed
|
|
|
|
}
|
2015-05-29 18:52:37 +00:00
|
|
|
if c.standby {
|
|
|
|
return ErrStandby
|
|
|
|
}
|
2015-05-28 18:40:01 +00:00
|
|
|
|
|
|
|
// Clear any progress or config
|
|
|
|
c.rekeyConfig = nil
|
|
|
|
c.rekeyProgress = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
// RekeyRetrieveBackup is used to retrieve any backed-up PGP-encrypted unseal
|
|
|
|
// keys
|
|
|
|
func (c *Core) RekeyRetrieveBackup() (*RekeyBackup, error) {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return nil, ErrSealed
|
|
|
|
}
|
|
|
|
if c.standby {
|
|
|
|
return nil, ErrStandby
|
|
|
|
}
|
|
|
|
|
|
|
|
entry, err := c.physical.Get(coreUnsealKeysBackupPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if entry == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := &RekeyBackup{}
|
|
|
|
err = json.Unmarshal(entry.Value, ret)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RekeyDeleteBackup is used to delete any backed-up PGP-encrypted unseal keys
|
|
|
|
func (c *Core) RekeyDeleteBackup() error {
|
|
|
|
c.stateLock.RLock()
|
|
|
|
defer c.stateLock.RUnlock()
|
|
|
|
if c.sealed {
|
|
|
|
return ErrSealed
|
|
|
|
}
|
|
|
|
if c.standby {
|
|
|
|
return ErrStandby
|
|
|
|
}
|
|
|
|
|
|
|
|
return c.physical.Delete(coreUnsealKeysBackupPath)
|
|
|
|
}
|
|
|
|
|
2015-03-11 22:19:41 +00:00
|
|
|
// postUnseal is invoked after the barrier is unsealed, but before
|
|
|
|
// allowing any user operations. This allows us to setup any state that
|
|
|
|
// requires the Vault to be unsealed such as mount tables, logical backends,
|
|
|
|
// credential stores, etc.
|
2015-11-02 16:01:00 +00:00
|
|
|
func (c *Core) postUnseal() (retErr error) {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now())
|
2015-11-02 16:01:00 +00:00
|
|
|
defer func() {
|
|
|
|
if retErr != nil {
|
|
|
|
c.preSeal()
|
|
|
|
}
|
|
|
|
}()
|
2015-04-14 21:06:15 +00:00
|
|
|
c.logger.Printf("[INFO] core: post-unseal setup starting")
|
2015-04-14 18:08:04 +00:00
|
|
|
if cache, ok := c.physical.(*physical.Cache); ok {
|
|
|
|
cache.Purge()
|
|
|
|
}
|
2015-05-28 23:11:31 +00:00
|
|
|
// HA mode requires us to handle keyring rotation and rekeying
|
|
|
|
if c.ha != nil {
|
|
|
|
if err := c.checkKeyUpgrades(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-29 21:30:03 +00:00
|
|
|
if err := c.barrier.ReloadMasterKey(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-28 23:11:31 +00:00
|
|
|
if err := c.barrier.ReloadKeyring(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-28 23:52:06 +00:00
|
|
|
if err := c.scheduleUpgradeCleanup(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-28 23:11:31 +00:00
|
|
|
}
|
2015-03-11 22:19:41 +00:00
|
|
|
if err := c.loadMounts(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-11 22:50:27 +00:00
|
|
|
if err := c.setupMounts(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-17 23:23:58 +00:00
|
|
|
if err := c.startRollback(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-18 21:00:42 +00:00
|
|
|
if err := c.setupPolicyStore(); err != nil {
|
2015-11-02 16:01:00 +00:00
|
|
|
return err
|
2015-03-18 21:00:42 +00:00
|
|
|
}
|
2015-03-18 22:46:07 +00:00
|
|
|
if err := c.loadCredentials(); err != nil {
|
2015-11-02 16:01:00 +00:00
|
|
|
return err
|
2015-03-18 22:46:07 +00:00
|
|
|
}
|
2015-03-18 22:30:31 +00:00
|
|
|
if err := c.setupCredentials(); err != nil {
|
2015-11-02 16:01:00 +00:00
|
|
|
return err
|
2015-03-18 22:30:31 +00:00
|
|
|
}
|
2015-03-24 01:00:14 +00:00
|
|
|
if err := c.setupExpiration(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-03-27 21:00:38 +00:00
|
|
|
if err := c.loadAudits(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := c.setupAudits(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-04-08 23:43:17 +00:00
|
|
|
c.metricsCh = make(chan struct{})
|
|
|
|
go c.emitMetrics(c.metricsCh)
|
2015-04-14 21:06:15 +00:00
|
|
|
c.logger.Printf("[INFO] core: post-unseal setup complete")
|
2015-03-11 22:19:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-03-13 18:16:24 +00:00
|
|
|
|
|
|
|
// preSeal is invoked before the barrier is sealed, allowing
|
|
|
|
// for any state teardown required.
|
2015-11-02 18:29:18 +00:00
|
|
|
func (c *Core) preSeal() error {
|
2015-04-08 23:43:17 +00:00
|
|
|
defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now())
|
2015-04-14 21:06:15 +00:00
|
|
|
c.logger.Printf("[INFO] core: pre-seal teardown starting")
|
2015-05-28 19:07:52 +00:00
|
|
|
|
|
|
|
// Clear any rekey progress
|
|
|
|
c.rekeyConfig = nil
|
|
|
|
c.rekeyProgress = nil
|
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
if c.metricsCh != nil {
|
|
|
|
close(c.metricsCh)
|
|
|
|
c.metricsCh = nil
|
|
|
|
}
|
2015-11-02 18:29:18 +00:00
|
|
|
var result error
|
2015-03-27 21:00:38 +00:00
|
|
|
if err := c.teardownAudits(); err != nil {
|
2015-11-02 18:29:18 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("[ERR] error tearing down audits: {{err}}", err))
|
2015-03-27 21:00:38 +00:00
|
|
|
}
|
2015-03-24 01:00:14 +00:00
|
|
|
if err := c.stopExpiration(); err != nil {
|
2015-11-02 18:29:18 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("[ERR] error stopping expiration: {{err}}", err))
|
2015-03-24 01:00:14 +00:00
|
|
|
}
|
2015-03-18 22:30:31 +00:00
|
|
|
if err := c.teardownCredentials(); err != nil {
|
2015-11-02 18:29:18 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("[ERR] error tearing down credentials: {{err}}", err))
|
2015-03-18 22:30:31 +00:00
|
|
|
}
|
2015-03-18 21:00:42 +00:00
|
|
|
if err := c.teardownPolicyStore(); err != nil {
|
2015-11-02 18:29:18 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("[ERR] error tearing down policy store: {{err}}", err))
|
2015-03-18 21:00:42 +00:00
|
|
|
}
|
2015-03-17 23:23:58 +00:00
|
|
|
if err := c.stopRollback(); err != nil {
|
2015-11-02 18:29:18 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("[ERR] error stopping rollback: {{err}}", err))
|
2015-03-17 23:23:58 +00:00
|
|
|
}
|
2015-03-13 18:16:24 +00:00
|
|
|
if err := c.unloadMounts(); err != nil {
|
2015-11-02 18:29:18 +00:00
|
|
|
result = multierror.Append(result, errwrap.Wrapf("[ERR] error unloading mounts: {{err}}", err))
|
2015-03-13 18:16:24 +00:00
|
|
|
}
|
2015-04-14 18:08:04 +00:00
|
|
|
if cache, ok := c.physical.(*physical.Cache); ok {
|
|
|
|
cache.Purge()
|
|
|
|
}
|
2015-04-14 21:06:15 +00:00
|
|
|
c.logger.Printf("[INFO] core: pre-seal teardown complete")
|
2015-11-02 18:29:18 +00:00
|
|
|
return result
|
2015-03-13 18:16:24 +00:00
|
|
|
}
|
2015-04-08 23:43:17 +00:00
|
|
|
|
2015-04-14 23:11:39 +00:00
|
|
|
// runStandby is a long running routine that is used when an HA backend
|
2015-04-14 21:06:15 +00:00
|
|
|
// is enabled. It waits until we are leader and switches this Vault to
|
|
|
|
// active.
|
2015-04-14 23:11:39 +00:00
|
|
|
func (c *Core) runStandby(doneCh, stopCh chan struct{}) {
|
2015-04-14 21:06:15 +00:00
|
|
|
defer close(doneCh)
|
2015-04-14 23:06:58 +00:00
|
|
|
c.logger.Printf("[INFO] core: entering standby mode")
|
2015-05-28 23:11:31 +00:00
|
|
|
|
|
|
|
// Monitor for key rotation
|
|
|
|
keyRotateDone := make(chan struct{})
|
|
|
|
keyRotateStop := make(chan struct{})
|
|
|
|
go c.periodicCheckKeyUpgrade(keyRotateDone, keyRotateStop)
|
|
|
|
defer func() {
|
|
|
|
close(keyRotateStop)
|
|
|
|
<-keyRotateDone
|
|
|
|
}()
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
for {
|
|
|
|
// Check for a shutdown
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a lock
|
2015-06-30 19:38:32 +00:00
|
|
|
uuid := uuid.GenerateUUID()
|
2015-04-14 23:44:48 +00:00
|
|
|
lock, err := c.ha.LockWith(coreLockPath, uuid)
|
2015-04-14 21:06:15 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to create lock: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt the acquisition
|
2015-10-08 18:34:10 +00:00
|
|
|
leaderLostCh := c.acquireLock(lock, stopCh)
|
2015-04-14 21:06:15 +00:00
|
|
|
|
|
|
|
// Bail if we are being shutdown
|
2015-10-08 18:34:10 +00:00
|
|
|
if leaderLostCh == nil {
|
2015-04-14 21:06:15 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
c.logger.Printf("[INFO] core: acquired lock, enabling active operation")
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// Advertise ourself as leader
|
2015-10-08 18:34:10 +00:00
|
|
|
if err := c.advertiseLeader(uuid, leaderLostCh); err != nil {
|
2015-04-14 23:44:48 +00:00
|
|
|
c.logger.Printf("[ERR] core: leader advertisement setup failed: %v", err)
|
|
|
|
lock.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Attempt the post-unseal process
|
|
|
|
c.stateLock.Lock()
|
|
|
|
err = c.postUnseal()
|
|
|
|
if err == nil {
|
2015-04-14 23:11:39 +00:00
|
|
|
c.standby = false
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
|
|
|
c.stateLock.Unlock()
|
|
|
|
|
|
|
|
// Handle a failure to unseal
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: post-unseal setup failed: %v", err)
|
|
|
|
lock.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Monitor a loss of leadership
|
|
|
|
select {
|
2015-10-08 18:34:10 +00:00
|
|
|
case <-leaderLostCh:
|
2015-04-14 21:06:15 +00:00
|
|
|
c.logger.Printf("[WARN] core: leadership lost, stopping active operation")
|
|
|
|
case <-stopCh:
|
|
|
|
c.logger.Printf("[WARN] core: stopping active operation")
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// Clear ourself as leader
|
|
|
|
if err := c.clearLeader(uuid); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: clearing leader advertisement failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-04-14 21:06:15 +00:00
|
|
|
// Attempt the pre-seal process
|
|
|
|
c.stateLock.Lock()
|
2015-04-14 23:11:39 +00:00
|
|
|
c.standby = true
|
2015-11-02 16:01:00 +00:00
|
|
|
preSealErr := c.preSeal()
|
2015-04-14 21:06:15 +00:00
|
|
|
c.stateLock.Unlock()
|
|
|
|
|
|
|
|
// Give up leadership
|
|
|
|
lock.Unlock()
|
|
|
|
|
|
|
|
// Check for a failure to prepare to seal
|
2015-11-02 16:01:00 +00:00
|
|
|
if preSealErr != nil {
|
2015-04-14 21:06:15 +00:00
|
|
|
c.logger.Printf("[ERR] core: pre-seal teardown failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-28 23:11:31 +00:00
|
|
|
// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
|
|
|
|
func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) {
|
|
|
|
defer close(doneCh)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(keyRotateCheckInterval):
|
|
|
|
// Only check if we are a standby
|
|
|
|
c.stateLock.RLock()
|
|
|
|
standby := c.standby
|
|
|
|
c.stateLock.RUnlock()
|
|
|
|
if !standby {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.checkKeyUpgrades(); err != nil {
|
2015-10-05 22:22:43 +00:00
|
|
|
c.logger.Printf("[ERR] core: key rotation periodic upgrade check failed: %v", err)
|
2015-05-28 23:11:31 +00:00
|
|
|
}
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkKeyUpgrades is used to check if there have been any key rotations
|
|
|
|
// and if there is a chain of upgrades available
|
|
|
|
func (c *Core) checkKeyUpgrades() error {
|
|
|
|
for {
|
2015-05-28 23:43:44 +00:00
|
|
|
// Check for an upgrade
|
|
|
|
didUpgrade, newTerm, err := c.barrier.CheckUpgrade()
|
2015-05-28 23:11:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if no upgrade
|
2015-05-28 23:43:44 +00:00
|
|
|
if !didUpgrade {
|
2015-05-28 23:11:31 +00:00
|
|
|
break
|
|
|
|
}
|
2015-05-28 23:43:44 +00:00
|
|
|
c.logger.Printf("[INFO] core: upgraded to key term %d", newTerm)
|
2015-05-28 23:11:31 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-28 23:52:06 +00:00
|
|
|
// scheduleUpgradeCleanup is used to ensure that all the upgrade paths
|
|
|
|
// are cleaned up in a timely manner if a leader failover takes place
|
|
|
|
func (c *Core) scheduleUpgradeCleanup() error {
|
|
|
|
// List the upgrades
|
|
|
|
upgrades, err := c.barrier.List(keyringUpgradePrefix)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to list upgrades: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nothing to do if no upgrades
|
|
|
|
if len(upgrades) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Schedule cleanup for all of them
|
|
|
|
time.AfterFunc(keyRotateGracePeriod, func() {
|
|
|
|
for _, upgrade := range upgrades {
|
|
|
|
path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
|
|
|
|
if err := c.barrier.Delete(path); err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to cleanup upgrade: %s", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-08 18:34:10 +00:00
|
|
|
// acquireLock blocks until the lock is acquired, returning the leaderLostCh
|
2015-04-14 21:06:15 +00:00
|
|
|
func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} {
|
|
|
|
for {
|
|
|
|
// Attempt lock acquisition
|
2015-10-08 18:34:10 +00:00
|
|
|
leaderLostCh, err := lock.Lock(stopCh)
|
2015-04-14 21:06:15 +00:00
|
|
|
if err == nil {
|
2015-10-08 18:34:10 +00:00
|
|
|
return leaderLostCh
|
2015-04-14 21:06:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Retry the acquisition
|
|
|
|
c.logger.Printf("[ERR] core: failed to acquire lock: %v", err)
|
|
|
|
select {
|
|
|
|
case <-time.After(lockRetryInterval):
|
|
|
|
case <-stopCh:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// advertiseLeader is used to advertise the current node as leader
|
2015-10-08 18:34:10 +00:00
|
|
|
func (c *Core) advertiseLeader(uuid string, leaderLostCh <-chan struct{}) error {
|
|
|
|
go c.cleanLeaderPrefix(uuid, leaderLostCh)
|
2015-04-14 23:44:48 +00:00
|
|
|
ent := &Entry{
|
|
|
|
Key: coreLeaderPrefix + uuid,
|
|
|
|
Value: []byte(c.advertiseAddr),
|
|
|
|
}
|
|
|
|
return c.barrier.Put(ent)
|
|
|
|
}
|
|
|
|
|
2015-10-08 18:34:10 +00:00
|
|
|
func (c *Core) cleanLeaderPrefix(uuid string, leaderLostCh <-chan struct{}) {
|
2015-10-08 17:47:21 +00:00
|
|
|
keys, err := c.barrier.List(coreLeaderPrefix)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[ERR] core: failed to list entries in core/leader: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2015-10-08 18:34:10 +00:00
|
|
|
for len(keys) > 0 {
|
2015-10-08 17:47:21 +00:00
|
|
|
select {
|
2015-10-08 18:34:10 +00:00
|
|
|
case <-time.After(leaderPrefixCleanDelay):
|
2015-10-08 17:47:21 +00:00
|
|
|
if keys[0] != uuid {
|
|
|
|
c.barrier.Delete(coreLeaderPrefix + keys[0])
|
|
|
|
}
|
|
|
|
keys = keys[1:]
|
2015-10-08 18:34:10 +00:00
|
|
|
case <-leaderLostCh:
|
2015-10-08 17:47:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 23:44:48 +00:00
|
|
|
// clearLeader is used to clear our leadership entry
|
|
|
|
func (c *Core) clearLeader(uuid string) error {
|
|
|
|
key := coreLeaderPrefix + uuid
|
|
|
|
return c.barrier.Delete(key)
|
|
|
|
}
|
|
|
|
|
2015-04-08 23:43:17 +00:00
|
|
|
// emitMetrics is used to periodically expose metrics while runnig
|
|
|
|
func (c *Core) emitMetrics(stopCh chan struct{}) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(time.Second):
|
2015-10-12 20:33:54 +00:00
|
|
|
c.metricsMutex.Lock()
|
|
|
|
if c.expiration != nil {
|
|
|
|
c.expiration.emitMetrics()
|
|
|
|
}
|
|
|
|
c.metricsMutex.Unlock()
|
2015-04-08 23:43:17 +00:00
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|