open-vault/http/handler.go

1245 lines
38 KiB
Go
Raw Normal View History

2015-03-12 06:05:16 +00:00
package http
import (
"bytes"
"context"
2015-03-12 06:05:16 +00:00
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"io/ioutil"
"mime"
2018-04-17 22:52:09 +00:00
"net"
2015-03-12 06:05:16 +00:00
"net/http"
"net/http/pprof"
2018-04-17 22:52:09 +00:00
"net/textproto"
2015-04-19 20:18:09 +00:00
"net/url"
"os"
"regexp"
2015-04-03 05:21:33 +00:00
"strings"
2017-10-23 21:41:44 +00:00
"time"
2015-03-12 06:05:16 +00:00
2019-11-07 16:54:34 +00:00
"github.com/NYTimes/gziphandler"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/pathmanager"
"github.com/hashicorp/vault/sdk/logical"
2015-03-12 06:05:16 +00:00
"github.com/hashicorp/vault/vault"
)
const (
2017-01-04 21:44:03 +00:00
// WrapTTLHeaderName is the name of the header containing a directive to
// wrap the response
WrapTTLHeaderName = "X-Vault-Wrap-TTL"
2017-01-04 21:44:03 +00:00
// WrapFormatHeaderName is the name of the header containing the format to
// wrap in; has no effect if the wrap TTL is not set
WrapFormatHeaderName = "X-Vault-Wrap-Format"
// NoRequestForwardingHeaderName is the name of the header telling Vault
// not to use request forwarding
NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
2016-11-17 20:06:43 +00:00
2017-10-23 21:39:21 +00:00
// MFAHeaderName represents the HTTP header which carries the credentials
// required to perform MFA on any path.
MFAHeaderName = "X-Vault-MFA"
// canonicalMFAHeaderName is the MFA header value's format in the request
// headers. Do not alter the casing of this string.
canonicalMFAHeaderName = "X-Vault-Mfa"
// PolicyOverrideHeaderName is the header set to request overriding
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
VaultIndexHeaderName = "X-Vault-Index"
VaultInconsistentHeaderName = "X-Vault-Inconsistent"
VaultForwardHeaderName = "X-Vault-Forward"
VaultInconsistentForward = "forward-active-node"
VaultInconsistentFail = "fail"
// DefaultMaxRequestSize is the default maximum accepted request size. This
// is to prevent a denial of service attack where no Content-Length is
// provided and the server is fed ever more data until it exhausts memory.
// Can be overridden per listener.
DefaultMaxRequestSize = 32 * 1024 * 1024
)
2017-10-23 21:39:21 +00:00
var (
// Set to false by stub_asset if the ui build tag isn't enabled
uiBuiltIn = true
2018-09-18 03:03:00 +00:00
// perfStandbyAlwaysForwardPaths is used to check a requested path against
// the always forward list
perfStandbyAlwaysForwardPaths = pathmanager.New()
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
alwaysRedirectPaths = pathmanager.New()
2018-09-18 03:03:00 +00:00
injectDataIntoTopRoutes = []string{
"/v1/sys/audit",
"/v1/sys/audit/",
"/v1/sys/audit-hash/",
"/v1/sys/auth",
"/v1/sys/auth/",
"/v1/sys/config/cors",
"/v1/sys/config/auditing/request-headers/",
"/v1/sys/config/auditing/request-headers",
"/v1/sys/capabilities",
"/v1/sys/capabilities-accessor",
"/v1/sys/capabilities-self",
"/v1/sys/ha-status",
"/v1/sys/key-status",
"/v1/sys/mounts",
"/v1/sys/mounts/",
"/v1/sys/policy",
"/v1/sys/policy/",
"/v1/sys/rekey/backup",
"/v1/sys/rekey/recovery-key-backup",
"/v1/sys/remount",
"/v1/sys/rotate",
"/v1/sys/wrapping/wrap",
}
oidcProtectedPathRegex = regexp.MustCompile(`^identity/oidc/provider/\w(([\w-.]+)?\w)?/userinfo$`)
2017-10-23 21:39:21 +00:00
)
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
func init() {
alwaysRedirectPaths.AddPaths([]string{
"sys/storage/raft/snapshot",
"sys/storage/raft/snapshot-force",
"!sys/storage/raft/snapshot-auto/config",
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
})
}
2015-03-12 06:05:16 +00:00
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(props *vault.HandlerProperties) http.Handler {
core := props.Core
2015-04-03 05:21:33 +00:00
// Create the muxer to handle the actual endpoints
2015-03-12 06:05:16 +00:00
mux := http.NewServeMux()
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
switch {
case props.RecoveryMode:
raw := vault.NewRawBackend(core)
strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken)
mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken))
mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy))
mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy))
default:
// Handle non-forwarded paths
mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core))
mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core))
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
mux.Handle("/v1/sys/init", handleSysInit(core))
mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core))
mux.Handle("/v1/sys/seal", handleSysSeal(core))
mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core)))
mux.Handle("/v1/sys/unseal", handleSysUnseal(core))
mux.Handle("/v1/sys/leader", handleSysLeader(core))
mux.Handle("/v1/sys/health", handleSysHealth(core))
mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core))
mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy))))
mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core,
handleAuditNonLogical(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy))))
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false)))
mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false)))
mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false)))
mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true)))
raft: add support for using backend for ha_storage (#9193) * raft: initial work on raft ha storage support * add note on join * add todo note * raft: add support for bootstrapping and joining existing nodes * raft: gate bootstrap join by reading leader api address from storage * raft: properly check for raft-only for certain conditionals * raft: add bootstrap to api and cli * raft: fix bootstrap cli command * raft: add test for setting up new cluster with raft HA * raft: extend TestRaft_HA_NewCluster to include inmem and consul backends * raft: add test for updating an existing cluster to use raft HA * raft: remove debug log lines, clean up verifyRaftPeers * raft: minor cleanup * raft: minor cleanup * Update physical/raft/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/ha.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/ha.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/logical_system_raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * address feedback comments * address feedback comments * raft: refactor tls keyring logic * address feedback comments * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * address feedback comments * testing: fix import ordering * raft: rename var, cleanup comment line * docs: remove ha_storage restriction note on raft * docs: more raft HA interaction updates with migration and recovery mode * docs: update the raft join command * raft: update comments * raft: add missing isRaftHAOnly check for clearing out state set earlier * raft: update a few ha_storage config checks * Update command/operator_raft_bootstrap.go Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com> * raft: address feedback comments * raft: fix panic when checking for config.HAStorage.Type * Update vault/raft.go Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * Update website/pages/docs/commands/operator/raft.mdx Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> * raft: remove bootstrap cli command * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * Update vault/raft.go Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * raft: address review feedback * raft: revert vendored sdk * raft: don't send applied index and node ID info if we're HA-only Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> Co-authored-by: Alexander Bezobchuk <alexanderbez@users.noreply.github.com> Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com>
2020-06-23 19:04:13 +00:00
mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core))
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core))
mux.Handle("/v1/sys/internal/ui/feature-flags", handleSysInternalFeatureFlags(core))
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
for _, path := range injectDataIntoTopRoutes {
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core)))
}
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core)))
if core.UIEnabled() == true {
if uiBuiltIn {
mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))))
mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))
} else {
mux.Handle("/ui/", handleUIHeaders(core, handleUIStub()))
}
mux.Handle("/ui", handleUIRedirect())
mux.Handle("/", handleUIRedirect())
2015-04-03 05:21:33 +00:00
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
}
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
// Register metrics path without authentication if enabled
if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess {
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core))
} else {
mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core))
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
}
if props.ListenerConfig != nil && props.ListenerConfig.Profiling.UnauthenticatedPProfAccess {
for _, name := range []string{"goroutine", "threadcreate", "heap", "allocs", "block", "mutex"} {
mux.Handle("/v1/sys/pprof/"+name, pprof.Handler(name))
}
mux.Handle("/v1/sys/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/v1/sys/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/v1/sys/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/v1/sys/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/v1/sys/pprof/trace", http.HandlerFunc(pprof.Trace))
} else {
mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core))
}
if props.ListenerConfig != nil && props.ListenerConfig.InFlightRequestLogging.UnauthenticatedInFlightAccess {
mux.Handle("/v1/sys/in-flight-req", handleUnAuthenticatedInFlightRequest(core))
} else {
mux.Handle("/v1/sys/in-flight-req", handleLogicalNoForward(core))
}
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
additionalRoutes(mux, core)
}
2018-09-18 03:03:00 +00:00
2015-04-03 05:21:33 +00:00
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
2017-06-17 04:04:55 +00:00
corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
2020-06-26 21:13:16 +00:00
quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core)
genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
printablePathCheckHandler := genericWrappedHandler
if !props.DisablePrintableCheck {
printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil)
}
return printablePathCheckHandler
}
type copyResponseWriter struct {
wrapped http.ResponseWriter
statusCode int
body *bytes.Buffer
}
// newCopyResponseWriter returns an initialized newCopyResponseWriter
func newCopyResponseWriter(wrapped http.ResponseWriter) *copyResponseWriter {
w := &copyResponseWriter{
wrapped: wrapped,
body: new(bytes.Buffer),
statusCode: 200,
}
return w
}
func (w *copyResponseWriter) Header() http.Header {
return w.wrapped.Header()
}
func (w *copyResponseWriter) Write(buf []byte) (int, error) {
w.body.Write(buf)
return w.wrapped.Write(buf)
}
func (w *copyResponseWriter) WriteHeader(code int) {
w.statusCode = code
w.wrapped.WriteHeader(code)
}
func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
origBody := new(bytes.Buffer)
reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody))
r.Body = reader
req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r)
if err != nil || status != 0 {
respondError(w, status, err)
return
}
if origBody != nil {
r.Body = ioutil.NopCloser(origBody)
}
input := &logical.LogInput{
Request: req,
}
err = core.AuditLogger().AuditRequest(r.Context(), input)
if err != nil {
respondError(w, status, err)
return
}
cw := newCopyResponseWriter(w)
h.ServeHTTP(cw, r)
data := make(map[string]interface{})
err = jsonutil.DecodeJSON(cw.body.Bytes(), &data)
if err != nil {
// best effort, ignore
}
httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()}
input.Response = logical.HTTPResponseToLogicalResponse(httpResp)
err = core.AuditLogger().AuditResponse(r.Context(), input)
if err != nil {
respondError(w, status, err)
}
return
})
2020-06-26 21:13:16 +00:00
}
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler {
var maxRequestDuration time.Duration
var maxRequestSize int64
if props.ListenerConfig != nil {
maxRequestDuration = props.ListenerConfig.MaxRequestDuration
maxRequestSize = props.ListenerConfig.MaxRequestSize
}
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
if maxRequestSize == 0 {
maxRequestSize = DefaultMaxRequestSize
}
// Swallow this error since we don't want to pollute the logs and we also don't want to
// return an HTTP error here. This information is best effort.
hostname, _ := os.Hostname()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// This block needs to be here so that upon sending SIGHUP, custom response
// headers are also reloaded into the handlers.
var customHeaders map[string][]*logical.CustomHeader
if props.ListenerConfig != nil {
la := props.ListenerConfig.Address
listenerCustomHeaders := core.GetListenerCustomResponseHeaders(la)
if listenerCustomHeaders != nil {
customHeaders = listenerCustomHeaders.StatusCodeHeaderMap
}
}
// saving start time for the in-flight requests
inFlightReqStartTime := time.Now()
nw := logical.NewStatusHeaderResponseWriter(w, customHeaders)
// Set the Cache-Control header for all the responses returned
// by Vault
nw.Header().Set("Cache-Control", "no-store")
// Start with the request context
ctx := r.Context()
var cancelFunc context.CancelFunc
// Add our timeout, but not for the monitor endpoint, as it's streaming
if strings.HasSuffix(r.URL.Path, "sys/monitor") {
ctx, cancelFunc = context.WithCancel(ctx)
} else {
ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration)
}
// if maxRequestSize < 0, no need to set context value
// Add a size limiter if desired
if maxRequestSize > 0 {
ctx = context.WithValue(ctx, "max_request_size", maxRequestSize)
}
2018-09-18 03:03:00 +00:00
ctx = context.WithValue(ctx, "original_request_path", r.URL.Path)
r = r.WithContext(ctx)
2020-06-26 21:13:16 +00:00
r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace))
2018-09-18 03:03:00 +00:00
// Set some response headers with raft node id (if applicable) and hostname, if available
if core.RaftNodeIDHeaderEnabled() {
nodeID := core.GetRaftNodeID()
if nodeID != "" {
nw.Header().Set("X-Vault-Raft-Node-ID", nodeID)
}
}
if core.HostnameHeaderEnabled() && hostname != "" {
nw.Header().Set("X-Vault-Hostname", hostname)
}
2018-09-18 03:03:00 +00:00
switch {
case strings.HasPrefix(r.URL.Path, "/v1/"):
newR, status := adjustRequest(core, r)
if status != 0 {
respondError(nw, status, nil)
2018-09-18 03:03:00 +00:00
cancelFunc()
return
}
r = newR
case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/":
2018-09-18 03:03:00 +00:00
default:
respondError(nw, http.StatusNotFound, nil)
2018-09-18 03:03:00 +00:00
cancelFunc()
return
}
// The uuid for the request is going to be generated when a logical
// request is generated. But, here we generate one to be able to track
// in-flight requests, and use that to update the req data with clientID
inFlightReqID, err := uuid.GenerateUUID()
if err != nil {
respondError(nw, http.StatusInternalServerError, fmt.Errorf("failed to generate an identifier for the in-flight request"))
}
// adding an entry to the context to enable updating in-flight
// data with ClientID in the logical layer
r = r.WithContext(context.WithValue(r.Context(), logical.CtxKeyInFlightRequestID{}, inFlightReqID))
// extracting the client address to be included in the in-flight request
var clientAddr string
headers := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if len(headers) == 0 {
clientAddr = r.RemoteAddr
} else {
clientAddr = headers[0]
}
// getting the request method
requestMethod := r.Method
// Storing the in-flight requests. Path should include namespace as well
core.StoreInFlightReqData(
inFlightReqID,
vault.InFlightReqData {
StartTime: inFlightReqStartTime,
ReqPath: r.URL.Path,
ClientRemoteAddr: clientAddr,
Method: requestMethod,
})
defer func() {
// Not expecting this fail, so skipping the assertion check
core.FinalizeInFlightReqData(inFlightReqID, nw.StatusCode)
}()
// Setting the namespace in the header to be included in the error message
ns := r.Header.Get(consts.NamespaceHeaderName)
if ns != "" {
nw.Header().Set(consts.NamespaceHeaderName, ns)
}
h.ServeHTTP(nw, r)
2020-06-26 21:13:16 +00:00
cancelFunc()
return
})
2015-04-03 05:21:33 +00:00
}
func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler {
rejectNotPresent := l.XForwardedForRejectNotPresent
hopSkips := l.XForwardedForHopSkips
authorizedAddrs := l.XForwardedForAuthorizedAddrs
rejectNotAuthz := l.XForwardedForRejectNotAuthorized
2018-04-17 22:52:09 +00:00
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if !headersOK || len(headers) == 0 {
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present"))
return
}
host, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// If not rejecting treat it like we just don't have a valid
// header because we can't do a comparison against an address we
// can't understand
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("error parsing client hostport: %w", err))
2018-04-17 22:52:09 +00:00
return
}
addr, err := sockaddr.NewIPAddr(host)
if err != nil {
// We treat this the same as the case above
if !rejectNotPresent {
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("error parsing client address: %w", err))
2018-04-17 22:52:09 +00:00
return
}
var found bool
for _, authz := range authorizedAddrs {
if authz.Contains(addr) {
found = true
break
}
}
if !found {
// If we didn't find it and aren't configured to reject, simply
// don't trust it
if !rejectNotAuthz {
2018-04-17 22:52:09 +00:00
h.ServeHTTP(w, r)
return
}
respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection"))
return
}
// At this point we have at least one value and it's authorized
// Split comma separated ones, which are common. This brings it in line
// to the multiple-header case.
var acc []string
for _, header := range headers {
vals := strings.Split(header, ",")
for _, v := range vals {
acc = append(acc, strings.TrimSpace(v))
}
}
indexToUse := int64(len(acc)) - 1 - hopSkips
2018-04-17 22:52:09 +00:00
if indexToUse < 0 {
// This is likely an error in either configuration or other
// infrastructure. We could either deny the request, or we
// could simply not trust the value. Denying the request is
// "safer" since if this logic is configured at all there may
// be an assumption it can always be trusted. Given that we can
// deny accepting the request at all if it's not from an
// authorized address, if we're at this point the address is
// authorized (or we've turned off explicit rejection) and we
// should assume that what comes in should be properly
// formatted.
respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers)))
return
}
r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port)
h.ServeHTTP(w, r)
return
})
}
2015-04-03 05:21:33 +00:00
// stripPrefix is a helper to strip a prefix from the path. It will
// return false from the second return value if it the prefix doesn't exist.
func stripPrefix(prefix, path string) (string, bool) {
if !strings.HasPrefix(path, prefix) {
return "", false
}
path = path[len(prefix):]
if path == "" {
return "", false
}
return path, true
2015-03-12 06:05:16 +00:00
}
func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
header := w.Header()
userHeaders, err := core.UIHeaders()
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
if userHeaders != nil {
for k := range userHeaders {
v := userHeaders.Get(k)
header.Set(k, v)
}
}
h.ServeHTTP(w, req)
})
}
func handleUI(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// The fileserver handler strips trailing slashes and does a redirect.
// We don't want the redirect to happen so we preemptively trim the slash
// here.
req.URL.Path = strings.TrimSuffix(req.URL.Path, "/")
h.ServeHTTP(w, req)
return
})
}
func handleUIStub() http.Handler {
stubHTML := `
<!DOCTYPE html>
<html>
2018-10-31 23:23:00 +00:00
<style>
body {
color: #1F2124;
font-family: system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
.wrapper {
display: flex;
justify-content: center;
align-items: center;
height: 500px;
}
.content ul {
line-height: 1.5;
}
a {
color: #1563ff;
text-decoration: none;
}
.header {
display: flex;
color: #6a7786;
align-items: center;
}
.header svg {
padding-right: 12px;
}
.alert {
transform: scale(0.07);
fill: #6a7786;
}
h1 {
font-weight: 500;
}
p {
margin-top: 0px;
}
</style>
<div class="wrapper">
<div class="content">
<div class="header">
<svg width="36px" height="36px" viewBox="0 0 36 36" xmlns="http://www.w3.org/2000/svg">
<path class="alert" d="M476.7 422.2L270.1 72.7c-2.9-5-8.3-8.7-14.1-8.7-5.9 0-11.3 3.7-14.1 8.7L35.3 422.2c-2.8 5-4.8 13-1.9 17.9 2.9 4.9 8.2 7.9 14 7.9h417.1c5.8 0 11.1-3 14-7.9 3-4.9 1-13-1.8-17.9zM288 400h-64v-48h64v48zm0-80h-64V176h64v144z"/>
</svg>
<h1>Vault UI is not available in this binary.</h1>
</div>
<p>To get Vault UI do one of the following:</p>
<ul>
<li><a href="https://www.vaultproject.io/downloads.html">Download an official release</a></li>
<li>Run <code>make bin</code> to create your own release binaries.
<li>Run <code>make dev-ui</code> to create a development binary with the UI.
</ul>
2018-10-31 23:23:00 +00:00
</div>
</div>
</html>
`
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(stubHTML))
})
}
2018-10-15 16:23:46 +00:00
func handleUIRedirect() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/ui/", 307)
return
})
}
type UIAssetWrapper struct {
FileSystem http.FileSystem
}
func (fsw *UIAssetWrapper) Open(name string) (http.File, error) {
file, err := fsw.FileSystem.Open(name)
if err == nil {
return file, nil
}
// serve index.html instead of 404ing
if errors.Is(err, fs.ErrNotExist) {
file, err := fsw.FileSystem.Open("index.html")
return file, err
}
return nil, err
}
func parseQuery(values url.Values) map[string]interface{} {
data := map[string]interface{}{}
for k, v := range values {
// Skip the help key as this is a reserved parameter
if k == "help" {
continue
}
switch {
case len(v) == 0:
case len(v) == 1:
data[k] = v[0]
default:
data[k] = v
}
}
if len(data) > 0 {
return data
}
return nil
}
func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) {
2016-11-17 20:06:43 +00:00
// Limit the maximum number of bytes to MaxRequestSize to protect
// against an indefinite amount of data being read.
reader := r.Body
ctx := r.Context()
maxRequestSize := ctx.Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
// MaxBytesReader won't do all the internal stuff it must unless it's
// given a ResponseWriter that implements the internal http interface
// requestTooLarger. So we let it have access to the underlying
// ResponseWriter.
inw := w
if myw, ok := inw.(logical.WrappingResponseWriter); ok {
inw = myw.Wrapped()
}
reader = http.MaxBytesReader(inw, r.Body, max)
}
}
var origBody io.ReadWriter
Recovery Mode (#7559) * Initial work * rework * s/dr/recovery * Add sys/raw support to recovery mode (#7577) * Factor the raw paths out so they can be run with a SystemBackend. # Conflicts: # vault/logical_system.go * Add handleLogicalRecovery which is like handleLogical but is only sufficient for use with the sys-raw endpoint in recovery mode. No authentication is done yet. * Integrate with recovery-mode. We now handle unauthenticated sys/raw requests, albeit on path v1/raw instead v1/sys/raw. * Use sys/raw instead raw during recovery. * Don't bother persisting the recovery token. Authenticate sys/raw requests with it. * RecoveryMode: Support generate-root for autounseals (#7591) * Recovery: Abstract config creation and log settings * Recovery mode integration test. (#7600) * Recovery: Touch up (#7607) * Recovery: Touch up * revert the raw backend creation changes * Added recovery operation token prefix * Move RawBackend to its own file * Update API path and hit it using CLI flag on generate-root * Fix a panic triggered when handling a request that yields a nil response. (#7618) * Improve integ test to actually make changes while in recovery mode and verify they're still there after coming back in regular mode. * Refuse to allow a second recovery token to be generated. * Resize raft cluster to size 1 and start as leader (#7626) * RecoveryMode: Setup raft cluster post unseal (#7635) * Setup raft cluster post unseal in recovery mode * Remove marking as unsealed as its not needed * Address review comments * Accept only one seal config in recovery mode as there is no scope for migration
2019-10-15 04:55:31 +00:00
if perfStandby {
// Since we're checking PerfStandby here we key on origBody being nil
// or not later, so we need to always allocate so it's non-nil
origBody = new(bytes.Buffer)
reader = ioutil.NopCloser(io.TeeReader(reader, origBody))
}
err := jsonutil.DecodeJSONFromReader(reader, out)
if err != nil && err != io.EOF {
return nil, fmt.Errorf("failed to parse JSON input: %w", err)
}
if origBody != nil {
return ioutil.NopCloser(origBody), err
}
return nil, err
2015-03-12 06:05:16 +00:00
}
// parseFormRequest parses values from a form POST.
//
// A nil map will be returned if the format is empty or invalid.
func parseFormRequest(r *http.Request) (map[string]interface{}, error) {
maxRequestSize := r.Context().Value("max_request_size")
if maxRequestSize != nil {
max, ok := maxRequestSize.(int64)
if !ok {
return nil, errors.New("could not parse max_request_size from request context")
}
if max > 0 {
r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max))
}
}
if err := r.ParseForm(); err != nil {
return nil, err
}
var data map[string]interface{}
if len(r.PostForm) != 0 {
data = make(map[string]interface{}, len(r.PostForm))
for k, v := range r.PostForm {
switch len(v) {
case 0:
case 1:
data[k] = v[0]
default:
// Almost anywhere taking in a string list can take in comma
// separated values, and really this is super niche anyways
data[k] = strings.Join(v, ",")
}
}
}
return data, nil
}
// forwardBasedOnHeaders returns true if the request headers specify that
// we should forward to the active node - either unconditionally or because
// a specified state isn't present locally.
func forwardBasedOnHeaders(core *vault.Core, r *http.Request) (bool, error) {
rawForward := r.Header.Get(VaultForwardHeaderName)
if rawForward != "" {
if !core.AllowForwardingViaHeader() {
return false, fmt.Errorf("forwarding via header %s disabled in configuration", VaultForwardHeaderName)
}
if rawForward == "active-node" {
return true, nil
}
return false, nil
}
rawInconsistent := r.Header.Get(VaultInconsistentHeaderName)
if rawInconsistent == "" {
return false, nil
}
switch rawInconsistent {
case VaultInconsistentForward:
if !core.AllowForwardingViaHeader() {
return false, fmt.Errorf("forwarding via header %s=%s disabled in configuration",
VaultInconsistentHeaderName, VaultInconsistentForward)
}
default:
return false, nil
}
return core.MissingRequiredState(r.Header.Values(VaultIndexHeaderName), core.PerfStandby()), nil
}
// handleRequestForwarding determines whether to forward a request or not,
// falling back on the older behavior of redirecting the client
func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Note if the client requested forwarding
shouldForward, err := forwardBasedOnHeaders(core, r)
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
// If we are a performance standby we can maybe handle the request.
if core.PerfStandby() && !shouldForward {
2018-09-18 03:03:00 +00:00
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
2018-10-15 16:56:24 +00:00
switch {
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path):
2018-09-18 03:03:00 +00:00
handler.ServeHTTP(w, r)
return
2018-10-15 16:56:24 +00:00
case strings.HasPrefix(path, "auth/token/create/"):
isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path)
if err == nil && isBatch {
handler.ServeHTTP(w, r)
return
}
2018-09-18 03:03:00 +00:00
}
}
// Note: in an HA setup, this call will also ensure that connections to
// the leader are set up, as that happens once the advertised cluster
// values are read during this function
isLeader, leaderAddr, _, err := core.Leader()
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve request normally
handler.ServeHTTP(w, r)
return
}
// Some internal error occurred
respondError(w, http.StatusInternalServerError, err)
return
}
if isLeader {
// No forwarding needed, we're leader
handler.ServeHTTP(w, r)
return
}
if leaderAddr == "" {
respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found"))
return
}
2018-09-18 03:03:00 +00:00
forwardRequest(core, w, r)
return
})
}
2018-09-18 03:03:00 +00:00
func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
respondStandby(core, w, r.URL)
return
}
2018-09-18 03:03:00 +00:00
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
respondStandby(core, w, r.URL)
return
}
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
if alwaysRedirectPaths.HasPath(path) {
respondStandby(core, w, r.URL)
return
}
2018-09-18 03:03:00 +00:00
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
2018-10-09 16:43:17 +00:00
core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back")
2018-09-18 03:03:00 +00:00
} else {
2018-10-09 16:43:17 +00:00
core.Logger().Error("forward request error", "error", err)
}
2018-09-18 03:03:00 +00:00
// Fall back to redirection
respondStandby(core, w, r.URL)
return
2018-09-18 03:03:00 +00:00
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
}
2015-04-08 18:19:03 +00:00
// request is a helper to perform a request and properly exit in the
// case of an error.
func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) {
resp, err := core.HandleRequest(rawReq.Context(), r)
2018-09-18 03:03:00 +00:00
if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) {
if resp == nil {
resp = &logical.Response{}
}
resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.")
}
if errwrap.Contains(err, consts.ErrStandby.Error()) {
2015-04-19 21:36:50 +00:00
respondStandby(core, w, rawReq.URL)
return resp, false, false
}
if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) {
return nil, false, true
2015-04-19 20:18:09 +00:00
}
2019-02-05 21:02:15 +00:00
if resp != nil && len(resp.Headers) > 0 {
// Set this here so it will take effect regardless of any other type of
// response processing
header := w.Header()
for k, v := range resp.Headers {
for _, h := range v {
header.Add(k, h)
}
}
switch {
case resp.Secret != nil,
resp.Auth != nil,
len(resp.Data) > 0,
resp.Redirect != "",
len(resp.Warnings) > 0,
resp.WrapInfo != nil:
// Nothing, resp has data
default:
// We have an otherwise totally empty response except for headers,
// so nil out the response now that the headers are written out
resp = nil
}
}
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 19:14:58 +00:00
// If vault's core has already written to the response writer do not add any
// additional output. Headers have already been sent. If the response writer
// is set but has not been written to it likely means there was some kind of
// error
if r.ResponseWriter != nil && r.ResponseWriter.Written() {
return nil, true, false
}
if respondErrorCommon(w, r, resp, err) {
return resp, false, false
2015-04-08 18:19:03 +00:00
}
return resp, true, false
2015-04-08 18:19:03 +00:00
}
2015-04-19 20:18:09 +00:00
// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby
func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
// Request the leader address
_, redirectAddr, _, err := core.Leader()
2015-04-19 20:18:09 +00:00
if err != nil {
if err == vault.ErrHANotEnabled {
// Standalone node, serve 503
err = errors.New("node is not active")
respondError(w, http.StatusServiceUnavailable, err)
return
}
2015-04-19 20:18:09 +00:00
respondError(w, http.StatusInternalServerError, err)
return
}
// If there is no leader, generate a 503 error
if redirectAddr == "" {
err = errors.New("no active Vault instance found")
2015-04-19 20:18:09 +00:00
respondError(w, http.StatusServiceUnavailable, err)
return
}
// Parse the redirect location
redirectURL, err := url.Parse(redirectAddr)
2015-04-19 20:18:09 +00:00
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
// Generate a redirect URL
finalURL := url.URL{
Scheme: redirectURL.Scheme,
Host: redirectURL.Host,
2015-04-19 20:18:09 +00:00
Path: reqURL.Path,
RawQuery: reqURL.RawQuery,
}
// Ensure there is a scheme, default to https
if finalURL.Scheme == "" {
finalURL.Scheme = "https"
2015-04-19 20:18:09 +00:00
}
// If we have an address, redirect! We use a 307 code
// because we don't actually know if its permanent and
// the request method should be preserved.
w.Header().Set("Location", finalURL.String())
2015-04-19 20:18:09 +00:00
w.WriteHeader(307)
}
2019-02-05 21:02:15 +00:00
// getTokenFromReq parse headers of the incoming request to extract token if
// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header.
// Returns true if the token was sourced from a Bearer header.
func getTokenFromReq(r *http.Request) (string, bool) {
if token := r.Header.Get(consts.AuthHeaderName); token != "" {
2019-02-05 21:02:15 +00:00
return token, false
}
if headers, ok := r.Header["Authorization"]; ok {
// Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3
2019-02-05 21:02:15 +00:00
// If string does not start by 'Bearer ', it is not one we would use,
// but might be used by plugins
for _, v := range headers {
if !strings.HasPrefix(v, "Bearer ") {
continue
}
return strings.TrimSpace(v[7:]), true
}
}
2019-02-05 21:02:15 +00:00
return "", false
}
2015-03-29 23:14:54 +00:00
// requestAuth adds the token to the logical.Request if it exists.
func requestAuth(r *http.Request, req *logical.Request) {
// Attach the header value if we have it
2019-02-05 21:02:15 +00:00
token, fromAuthzHeader := getTokenFromReq(r)
if token != "" {
req.ClientToken = token
req.ClientTokenSource = logical.ClientTokenFromVaultHeader
if fromAuthzHeader {
req.ClientTokenSource = logical.ClientTokenFromAuthzHeader
}
}
2018-09-18 03:03:00 +00:00
}
func requestPolicyOverride(r *http.Request, req *logical.Request) error {
raw := r.Header.Get(PolicyOverrideHeaderName)
if raw == "" {
return nil
}
override, err := parseutil.ParseBool(raw)
if err != nil {
return err
}
req.PolicyOverride = override
return nil
2015-03-29 23:14:54 +00:00
}
2017-01-04 21:44:03 +00:00
// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) {
// First try for the header value
wrapTTL := r.Header.Get(WrapTTLHeaderName)
if wrapTTL == "" {
return req, nil
}
// If it has an allowed suffix parse as a duration string
dur, err := parseutil.ParseDurationSecond(wrapTTL)
if err != nil {
return req, err
}
if int64(dur) < 0 {
return req, fmt.Errorf("requested wrap ttl cannot be negative")
}
2017-01-04 21:44:03 +00:00
req.WrapInfo = &logical.RequestWrapInfo{
TTL: dur,
}
wrapFormat := r.Header.Get(WrapFormatHeaderName)
switch wrapFormat {
case "jwt":
req.WrapInfo.Format = "jwt"
}
return req, nil
}
2018-09-18 03:03:00 +00:00
// parseMFAHeader parses the MFAHeaderName in the request headers and organizes
// them with MFA method name as the index.
func parseMFAHeader(req *logical.Request) error {
if req == nil {
return fmt.Errorf("request is nil")
}
if req.Headers == nil {
return nil
}
// Reset and initialize the credentials in the request
req.MFACreds = make(map[string][]string)
for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] {
// Skip the header with no value in it
if mfaHeaderValue == "" {
continue
}
// Handle the case where only method name is mentioned and no value
// is supplied
if !strings.Contains(mfaHeaderValue, ":") {
// Mark the presence of method name, but set an empty set to it
2018-09-18 03:03:00 +00:00
// indicating that there were no values supplied for the method
if req.MFACreds[mfaHeaderValue] == nil {
req.MFACreds[mfaHeaderValue] = []string{}
}
continue
}
shardSplits := strings.SplitN(mfaHeaderValue, ":", 2)
if shardSplits[0] == "" {
return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName)
}
if shardSplits[1] == "" {
return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName)
}
req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1])
}
return nil
}
// isForm tries to determine whether the request should be
// processed as a form or as JSON.
//
// Virtually all existing use cases have assumed processing as JSON,
// and there has not been a Content-Type requirement in the API. In order to
// maintain backwards compatibility, this will err on the side of JSON.
// The request will be considered a form only if:
//
// 1. The content type is "application/x-www-form-urlencoded"
// 2. The start of the request doesn't look like JSON. For this test we
// we expect the body to begin with { or [, ignoring leading whitespace.
func isForm(head []byte, contentType string) bool {
contentType, _, err := mime.ParseMediaType(contentType)
if err != nil || contentType != "application/x-www-form-urlencoded" {
return false
}
// Look for the start of JSON or not-JSON, skipping any insignificant
// whitespace (per https://tools.ietf.org/html/rfc7159#section-2).
for _, c := range head {
switch c {
case ' ', '\t', '\n', '\r':
continue
case '[', '{': // JSON
return false
default: // not JSON
return true
}
}
return true
}
2015-03-12 06:05:16 +00:00
func respondError(w http.ResponseWriter, status int, err error) {
logical.RespondError(w, status, err)
2015-03-12 06:05:16 +00:00
}
func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool {
statusCode, newErr := logical.RespondErrorCommon(req, resp, err)
if newErr == nil && statusCode == 0 {
return false
}
// If ErrPermissionDenied occurs for OIDC protected resources (e.g., userinfo),
// then respond with a JSON error format that complies with the specification.
// This prevents the JSON error format from changing to a Vault-y format (i.e.,
// the format that results from respondError) after an OIDC access token expires.
if oidcPermissionDenied(req.Path, err) {
respondOIDCPermissionDenied(w)
return true
}
respondError(w, statusCode, newErr)
return true
}
2015-03-12 06:05:16 +00:00
func respondOk(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
2015-03-12 06:05:16 +00:00
if body == nil {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
enc.Encode(body)
}
}
// oidcPermissionDenied returns true if the given path matches the
// UserInfo Endpoint published by Vault OIDC providers and the given
// error is a logical.ErrPermissionDenied.
func oidcPermissionDenied(path string, err error) bool {
return errwrap.Contains(err, logical.ErrPermissionDenied.Error()) &&
oidcProtectedPathRegex.MatchString(path)
}
// respondOIDCPermissionDenied writes a response to the given w for
// permission denied errors (expired token) on resources protected
// by OIDC access tokens. Currently, the UserInfo Endpoint is the only
// protected resource. See the following specifications for details:
// - https://openid.net/specs/openid-connect-core-1_0.html#UserInfoError
// - https://datatracker.ietf.org/doc/html/rfc6750#section-3.1
func respondOIDCPermissionDenied(w http.ResponseWriter) {
errorCode := "invalid_token"
errorDescription := logical.ErrPermissionDenied.Error()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Bearer error=%q,error_description=%q",
errorCode, errorDescription))
w.WriteHeader(http.StatusUnauthorized)
var oidcResponse struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
}
oidcResponse.Error = errorCode
oidcResponse.ErrorDescription = errorDescription
enc := json.NewEncoder(w)
enc.Encode(oidcResponse)
}