From 325e822e8aa5f7d73d164232399b42b44b9a04bf Mon Sep 17 00:00:00 2001 From: hc-github-team-secure-vault-core <82990506+hc-github-team-secure-vault-core@users.noreply.github.com> Date: Mon, 16 Oct 2023 07:38:11 -0400 Subject: [PATCH] VAULT-20476: vault.NewCore refactor. (#23644) (#23659) * NewCore tech debt refactoring * addExtraCredentialBackends * singletonMounts => mountTypeToken instead of 'token' * NewCore tests support ent backend addition * PR feedback * reorder method calls * mounthPath___ standardization * Try to be more explicit about the min number of backends * Include cluster listener * explicit declaration of events before assignment * Removed nil checking * resolve conflicts Co-authored-by: Peter Wilson --- vault/auth.go | 8 +- vault/core.go | 233 ++++++++++++++++++----------- vault/core_test.go | 304 ++++++++++++++++++++++++++++++++++++++ vault/core_util.go | 6 +- vault/logical_system.go | 2 +- vault/mount.go | 64 ++++---- vault/mount_util.go | 2 +- vault/plugin_reload.go | 4 +- vault/router.go | 8 +- vault/token_store.go | 4 +- vault/token_store_test.go | 4 +- 11 files changed, 507 insertions(+), 132 deletions(-) diff --git a/vault/auth.go b/vault/auth.go index e5297f148..cef9e2ed8 100644 --- a/vault/auth.go +++ b/vault/auth.go @@ -116,7 +116,7 @@ func (c *Core) enableCredentialInternal(ctx context.Context, entry *MountEntry, } // Ensure the token backend is a singleton - if entry.Type == "token" { + if entry.Type == mountTypeToken { return fmt.Errorf("token credential backend cannot be instantiated") } @@ -883,7 +883,7 @@ func (c *Core) setupCredentials(ctx context.Context) error { } // Check if this is the token store - if entry.Type == "token" { + if entry.Type == mountTypeToken { c.tokenStore = backend.(*TokenStore) // At some point when this isn't beta we may persist this but for @@ -893,7 +893,7 @@ func (c *Core) setupCredentials(ctx context.Context) error { // this is loaded *after* the normal mounts, including cubbyhole c.router.tokenStoreSaltFunc = c.tokenStore.Salt if !c.IsDRSecondary() { - c.tokenStore.cubbyholeBackend = c.router.MatchingBackend(ctx, cubbyholeMountPath).(*CubbyholeBackend) + c.tokenStore.cubbyholeBackend = c.router.MatchingBackend(ctx, mountPathCubbyhole).(*CubbyholeBackend) } } @@ -1048,7 +1048,7 @@ func (c *Core) defaultAuthTable() *MountTable { tokenAuth := &MountEntry{ Table: credentialTableType, Path: "token/", - Type: "token", + Type: mountTypeToken, Description: "token based credentials", UUID: tokenUUID, Accessor: tokenAccessor, diff --git a/vault/core.go b/vault/core.go index 270b87866..50d7641a9 100644 --- a/vault/core.go +++ b/vault/core.go @@ -125,6 +125,15 @@ const ( // undoLogsAreSafeStoragePath is a storage path that we write once we know undo logs are // safe, so we don't have to keep checking all the time. undoLogsAreSafeStoragePath = "core/raft/undo_logs_are_safe" + + ErrMlockFailedTemplate = "Failed to lock memory: %v\n\n" + + "This usually means that the mlock syscall is not available.\n" + + "Vault uses mlock to prevent memory from being swapped to\n" + + "disk. This requires root privileges as well as a machine\n" + + "that supports mlock. Please enable mlock on your system or\n" + + "disable Vault from using it. To disable Vault from using it,\n" + + "set the `disable_mlock` configuration option in your configuration\n" + + "file." ) var ( @@ -1126,30 +1135,27 @@ func CreateCore(conf *CoreConfig) (*Core, error) { return c, nil } -// NewCore is used to construct a new core +// NewCore creates, initializes and configures a Vault node (core). func NewCore(conf *CoreConfig) (*Core, error) { - var err error + // NOTE: The order of configuration of the core has some importance, as we can + // make use of an early return if we are running this new core in recovery mode. c, err := CreateCore(conf) if err != nil { return nil, err } - if err = coreInit(c, conf); err != nil { + + err = coreInit(c, conf) + if err != nil { return nil, err } - if !conf.DisableMlock { - // Ensure our memory usage is locked into physical RAM - if err := mlock.LockMemory(); err != nil { - return nil, fmt.Errorf( - "Failed to lock memory: %v\n\n"+ - "This usually means that the mlock syscall is not available.\n"+ - "Vault uses mlock to prevent memory from being swapped to\n"+ - "disk. This requires root privileges as well as a machine\n"+ - "that supports mlock. Please enable mlock on your system or\n"+ - "disable Vault from using it. To disable Vault from using it,\n"+ - "set the `disable_mlock` configuration option in your configuration\n"+ - "file.", - err) + switch { + case conf.DisableMlock: + // User configured that memory lock should be disabled on unix systems. + default: + err = mlock.LockMemory() + if err != nil { + return nil, fmt.Errorf(ErrMlockFailedTemplate, err) } } @@ -1159,9 +1165,11 @@ func NewCore(conf *CoreConfig) (*Core, error) { return nil, fmt.Errorf("barrier setup failed: %w", err) } - if err := storedLicenseCheck(c, conf); err != nil { + err = storedLicenseCheck(c, conf) + if err != nil { return nil, err } + // We create the funcs here, then populate the given config with it so that // the caller can share state conf.ReloadFuncsLock = &c.reloadFuncsLock @@ -1171,12 +1179,12 @@ func NewCore(conf *CoreConfig) (*Core, error) { conf.ReloadFuncs = &c.reloadFuncs c.rollbackPeriod = conf.RollbackPeriod - if conf.RollbackPeriod == 0 { - c.rollbackPeriod = time.Minute + if c.rollbackPeriod == 0 { + // Default to 1 minute + c.rollbackPeriod = 1 * time.Minute } - // All the things happening below this are not required in - // recovery mode + // For recovery mode we've now configured enough to return early. if c.recoveryMode { return c, nil } @@ -1195,81 +1203,39 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.pluginFilePermissions = conf.PluginFilePermissions } - createSecondaries(c, conf) + // Create secondaries (this will only impact Enterprise versions of Vault) + c.createSecondaries(conf.Logger) if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() { c.ha = conf.HAPhysical } + // MFA method c.loginMFABackend = NewLoginMFABackend(c, conf.Logger) - logicalBackends := make(map[string]logical.Factory) - for k, f := range conf.LogicalBackends { - logicalBackends[k] = f - } - _, ok := logicalBackends["kv"] - if !ok { - logicalBackends["kv"] = PassthroughBackendFactory - } + // Logical backends + c.configureLogicalBackends(conf.LogicalBackends, conf.Logger, conf.AdministrativeNamespacePath) - logicalBackends["cubbyhole"] = CubbyholeBackendFactory - logicalBackends[systemMountType] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - sysBackendLogger := conf.Logger.Named("system") - b := NewSystemBackend(c, sysBackendLogger) - if err := b.Setup(ctx, config); err != nil { - return nil, err - } - return b, nil - } - logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - identityLogger := conf.Logger.Named("identity") - return NewIdentityStore(ctx, c, config, identityLogger) - } - addExtraLogicalBackends(c, logicalBackends, conf.AdministrativeNamespacePath) - c.logicalBackends = logicalBackends + // Credentials backends + c.configureCredentialsBackends(conf.CredentialBackends, conf.Logger) - credentialBackends := make(map[string]logical.Factory) - for k, f := range conf.CredentialBackends { - credentialBackends[k] = f - } - credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - tsLogger := conf.Logger.Named("token") - return NewTokenStore(ctx, tsLogger, c, config) - } - addExtraCredentialBackends(c, credentialBackends) - c.credentialBackends = credentialBackends - - auditBackends := make(map[string]audit.Factory) - for k, f := range conf.AuditBackends { - auditBackends[k] = f - } - c.auditBackends = auditBackends + // Audit backends + c.configureAuditBackends(conf.AuditBackends) + // UI uiStoragePrefix := systemBarrierPrefix + "ui" c.uiConfig = NewUIConfig(conf.EnableUI, physical.NewView(c.physical, uiStoragePrefix), NewBarrierView(c.barrier, uiStoragePrefix)) - c.clusterListener.Store((*cluster.Listener)(nil)) - - // for listeners with custom response headers, configuring customListenerHeader - if conf.RawConfig.Listeners != nil { - uiHeaders, err := c.UIHeaders() - if err != nil { - return nil, err - } - c.customListenerHeader.Store(NewListenerCustomHeader(conf.RawConfig.Listeners, c.logger, uiHeaders)) - } else { - c.customListenerHeader.Store(([]*ListenerCustomHeaders)(nil)) + // Listeners + err = c.configureListeners(conf) + if err != nil { + return nil, err } - logRequestsLevel := conf.RawConfig.LogRequestsLevel - c.logRequestsLevel = uberAtomic.NewInt32(0) - switch { - case log.LevelFromString(logRequestsLevel) > log.NoLevel && log.LevelFromString(logRequestsLevel) < log.Off: - c.logRequestsLevel.Store(int32(log.LevelFromString(logRequestsLevel))) - case logRequestsLevel != "": - c.logger.Warn("invalid log_requests_level", "level", conf.RawConfig.LogRequestsLevel) - } + // Log level + c.configureLogRequestLevel(conf.RawConfig.LogLevel) + // Quotas quotasLogger := conf.Logger.Named("quotas") c.quotaManager, err = quotas.NewManager(quotasLogger, c.quotaLeaseWalker, c.metricSink) if err != nil { @@ -1281,14 +1247,14 @@ func NewCore(conf *CoreConfig) (*Core, error) { return nil, err } + // Version history if c.versionHistory == nil { c.logger.Info("Initializing version history cache for core") c.versionHistory = make(map[string]VaultVersion) } - // start the event system - eventsLogger := conf.Logger.Named("events") - events, err := eventbus.NewEventBus(eventsLogger) + // Events + events, err := eventbus.NewEventBus(conf.Logger.Named("events")) if err != nil { return nil, err } @@ -1301,9 +1267,110 @@ func NewCore(conf *CoreConfig) (*Core, error) { // yet registered core to the server command's SubloggerAdder, so any new // subloggers will be in conf.AllLoggers. c.allLoggers = conf.AllLoggers + return c, nil } +// configureListeners configures the Core with the listeners from the CoreConfig. +func (c *Core) configureListeners(conf *CoreConfig) error { + c.clusterListener.Store((*cluster.Listener)(nil)) + + if conf.RawConfig.Listeners == nil { + c.customListenerHeader.Store(([]*ListenerCustomHeaders)(nil)) + return nil + } + + uiHeaders, err := c.UIHeaders() + if err != nil { + return err + } + + c.customListenerHeader.Store(NewListenerCustomHeader(conf.RawConfig.Listeners, c.logger, uiHeaders)) + + return nil +} + +// configureLogRequestLevel configures the Core with the supplied log level. +func (c *Core) configureLogRequestLevel(level string) { + c.logRequestsLevel = uberAtomic.NewInt32(0) + + lvl := log.LevelFromString(level) + + switch { + case lvl > log.NoLevel && lvl < log.Off: + c.logRequestsLevel.Store(int32(lvl)) + case level != "": + c.logger.Warn("invalid log_requests_level", "level", level) + } +} + +// configureAuditBackends configures the Core with the ability to create audit +// backends for various types. +func (c *Core) configureAuditBackends(backends map[string]audit.Factory) { + auditBackends := make(map[string]audit.Factory, len(backends)) + + for k, f := range backends { + auditBackends[k] = f + } + + c.auditBackends = auditBackends +} + +// configureCredentialsBackends configures the Core with the ability to create +// credential backends for various types. +func (c *Core) configureCredentialsBackends(backends map[string]logical.Factory, logger log.Logger) { + credentialBackends := make(map[string]logical.Factory, len(backends)) + + for k, f := range backends { + credentialBackends[k] = f + } + + credentialBackends[mountTypeToken] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + return NewTokenStore(ctx, logger.Named("token"), c, config) + } + + c.credentialBackends = credentialBackends + + c.addExtraCredentialBackends() +} + +// configureLogicalBackends configures the Core with the ability to create +// logical backends for various types. +func (c *Core) configureLogicalBackends(backends map[string]logical.Factory, logger log.Logger, adminNamespacePath string) { + logicalBackends := make(map[string]logical.Factory, len(backends)) + + for k, f := range backends { + logicalBackends[k] = f + } + + // KV + _, ok := logicalBackends[mountTypeKV] + if !ok { + logicalBackends[mountTypeKV] = PassthroughBackendFactory + } + + // Cubbyhole + logicalBackends[mountTypeCubbyhole] = CubbyholeBackendFactory + + // System + logicalBackends[mountTypeSystem] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := NewSystemBackend(c, logger.Named("system")) + if err := b.Setup(ctx, config); err != nil { + return nil, err + } + return b, nil + } + + // Identity + logicalBackends[mountTypeIdentity] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + return NewIdentityStore(ctx, c, config, logger.Named("identity")) + } + + c.logicalBackends = logicalBackends + + c.addExtraLogicalBackends(adminNamespacePath) +} + // handleVersionTimeStamps stores the current version at the current time to // storage, and then loads all versions and upgrade timestamps out from storage. func (c *Core) handleVersionTimeStamps(ctx context.Context) error { diff --git a/vault/core_test.go b/vault/core_test.go index 59e0706f1..e3fad5409 100644 --- a/vault/core_test.go +++ b/vault/core_test.go @@ -13,6 +13,19 @@ import ( "testing" "time" + "github.com/hashicorp/vault/command/server" + + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + + "github.com/hashicorp/vault/builtin/plugin" + + "github.com/hashicorp/vault/builtin/audit/syslog" + + "github.com/hashicorp/vault/builtin/audit/file" + "github.com/hashicorp/vault/builtin/audit/socket" + "github.com/stretchr/testify/require" + "github.com/go-test/deep" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" @@ -35,6 +48,297 @@ import ( // invalidKey is used to test Unseal var invalidKey = []byte("abcdefghijklmnopqrstuvwxyz")[:17] +// TestNewCore_configureAuditBackends ensures that we are able to configure the +// supplied audit backends when getting a NewCore. +func TestNewCore_configureAuditBackends(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + backends map[string]audit.Factory + }{ + "none": { + backends: nil, + }, + "file": { + backends: map[string]audit.Factory{ + "file": file.Factory, + }, + }, + "socket": { + backends: map[string]audit.Factory{ + "socket": socket.Factory, + }, + }, + "syslog": { + backends: map[string]audit.Factory{ + "syslog": syslog.Factory, + }, + }, + "all": { + backends: map[string]audit.Factory{ + "file": file.Factory, + "socket": socket.Factory, + "syslog": syslog.Factory, + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + core := &Core{} + require.Len(t, core.auditBackends, 0) + core.configureAuditBackends(tc.backends) + require.Len(t, core.auditBackends, len(tc.backends)) + for k := range tc.backends { + require.Contains(t, core.auditBackends, k) + } + }) + } +} + +// TestNewCore_configureCredentialsBackends ensures that we are able to configure the +// supplied credential backends, in addition to defaults, when getting a NewCore. +func TestNewCore_configureCredentialsBackends(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + backends map[string]logical.Factory + }{ + "none": { + backends: nil, + }, + "plugin": { + backends: map[string]logical.Factory{ + "plugin": plugin.Factory, + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + core := &Core{} + require.Len(t, core.credentialBackends, 0) + core.configureCredentialsBackends(tc.backends, corehelpers.NewTestLogger(t)) + require.GreaterOrEqual(t, len(core.credentialBackends), len(tc.backends)+1) // token + ent + for k := range tc.backends { + require.Contains(t, core.credentialBackends, k) + } + }) + } +} + +// TestNewCore_configureLogicalBackends ensures that we are able to configure the +// supplied logical backends, in addition to defaults, when getting a NewCore. +func TestNewCore_configureLogicalBackends(t *testing.T) { + t.Parallel() + + // configureLogicalBackends will add some default backends for us: + // cubbyhole + // identity + // kv + // system + // In addition Enterprise versions of Vault may add additional engines. + + tests := map[string]struct { + backends map[string]logical.Factory + adminNamespacePath string + expectedNonEntBackends int + }{ + "none": { + backends: nil, + expectedNonEntBackends: 0, + }, + "database": { + backends: map[string]logical.Factory{ + "database": logicalDb.Factory, + }, + adminNamespacePath: "foo", + expectedNonEntBackends: 5, // database + defaults + }, + "kv": { + backends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + adminNamespacePath: "foo", + expectedNonEntBackends: 4, // kv + defaults (kv is a default) + }, + "plugin": { + backends: map[string]logical.Factory{ + "plugin": plugin.Factory, + }, + adminNamespacePath: "foo", + expectedNonEntBackends: 5, // plugin + defaults + }, + "all": { + backends: map[string]logical.Factory{ + "database": logicalDb.Factory, + "kv": logicalKv.Factory, + "plugin": plugin.Factory, + }, + adminNamespacePath: "foo", + expectedNonEntBackends: 6, // database, plugin + defaults + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + core := &Core{} + require.Len(t, core.logicalBackends, 0) + core.configureLogicalBackends(tc.backends, corehelpers.NewTestLogger(t), tc.adminNamespacePath) + require.GreaterOrEqual(t, len(core.logicalBackends), tc.expectedNonEntBackends) + require.Contains(t, core.logicalBackends, mountTypeKV) + require.Contains(t, core.logicalBackends, mountTypeCubbyhole) + require.Contains(t, core.logicalBackends, mountTypeSystem) + require.Contains(t, core.logicalBackends, mountTypeIdentity) + for k := range tc.backends { + require.Contains(t, core.logicalBackends, k) + } + }) + } +} + +// TestNewCore_configureLogRequestLevel ensures that we are able to configure the +// supplied logging level when getting a NewCore. +func TestNewCore_configureLogRequestLevel(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + level string + expectedLevel log.Level + }{ + "none": { + level: "", + expectedLevel: log.NoLevel, + }, + "trace": { + level: "trace", + expectedLevel: log.Trace, + }, + "debug": { + level: "debug", + expectedLevel: log.Debug, + }, + "info": { + level: "info", + expectedLevel: log.Info, + }, + "warn": { + level: "warn", + expectedLevel: log.Warn, + }, + "error": { + level: "error", + expectedLevel: log.Error, + }, + "bad": { + level: "foo", + expectedLevel: log.NoLevel, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // We need to supply a logger, as configureLogRequestLevel emits + // warnings to the logs in certain circumstances. + core := &Core{ + logger: corehelpers.NewTestLogger(t), + } + core.configureLogRequestLevel(tc.level) + require.Equal(t, tc.expectedLevel, log.Level(core.logRequestsLevel.Load())) + }) + } +} + +// TestNewCore_configureListeners tests that we are able to configure listeners +// on a NewCore via config. +func TestNewCore_configureListeners(t *testing.T) { + // We would usually expect CoreConfig to come from server.NewConfig(). + // However, we want to fiddle to give us some granular control over the config. + tests := map[string]struct { + config *CoreConfig + expectedListeners []*ListenerCustomHeaders + }{ + "nil-listeners": { + config: &CoreConfig{ + RawConfig: &server.Config{ + SharedConfig: &configutil.SharedConfig{}, + }, + }, + expectedListeners: nil, + }, + "listeners-empty": { + config: &CoreConfig{ + RawConfig: &server.Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{}, + }, + }, + }, + expectedListeners: nil, + }, + "listeners-some": { + config: &CoreConfig{ + RawConfig: &server.Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + {Address: "foo"}, + }, + }, + }, + }, + expectedListeners: []*ListenerCustomHeaders{ + {Address: "foo"}, + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // We need to init some values ourselves, usually CreateCore does this for us. + logger := corehelpers.NewTestLogger(t) + backend, err := inmem.NewInmem(nil, logger) + require.NoError(t, err) + storage := &logical.InmemStorage{} + core := &Core{ + clusterListener: new(atomic.Value), + customListenerHeader: new(atomic.Value), + uiConfig: NewUIConfig(false, backend, storage), + } + + err = core.configureListeners(tc.config) + require.NoError(t, err) + switch tc.expectedListeners { + case nil: + require.Nil(t, core.customListenerHeader.Load()) + default: + for i, v := range core.customListenerHeader.Load().([]*ListenerCustomHeaders) { + require.Equal(t, v.Address, tc.config.RawConfig.Listeners[i].Address) + } + } + }) + } +} + func TestNewCore_badRedirectAddr(t *testing.T) { logger = logging.NewVaultLogger(log.Trace) diff --git a/vault/core_util.go b/vault/core_util.go index 78fc8f829..6298c24f9 100644 --- a/vault/core_util.go +++ b/vault/core_util.go @@ -78,11 +78,11 @@ func (c *Core) EnableUndoLogs() {} func (c *Core) PersistUndoLogs() error { return nil } func (c *Core) teardownReplicationResolverHandler() {} -func createSecondaries(*Core, *CoreConfig) {} +func (c *Core) createSecondaries(_ hclog.Logger) {} -func addExtraLogicalBackends(*Core, map[string]logical.Factory, string) {} +func (c *Core) addExtraLogicalBackends(_ string) {} -func addExtraCredentialBackends(*Core, map[string]logical.Factory) {} +func (c *Core) addExtraCredentialBackends() {} func preUnsealInternal(context.Context, *Core) error { return nil } diff --git a/vault/logical_system.go b/vault/logical_system.go index 02be0a986..0542462ac 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -2032,7 +2032,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, if !strings.HasPrefix(path, "auth/") { return logical.ErrorResponse(fmt.Sprintf("'token_type' can only be modified on auth mounts")), logical.ErrInvalidRequest } - if mountEntry.Type == "token" || mountEntry.Type == "ns_token" { + if mountEntry.Type == mountTypeToken || mountEntry.Type == mountTypeNSToken { return logical.ErrorResponse(fmt.Sprintf("'token_type' cannot be set for 'token' or 'ns_token' auth mounts")), logical.ErrInvalidRequest } diff --git a/vault/mount.go b/vault/mount.go index 29e179ae8..890f5a935 100644 --- a/vault/mount.go +++ b/vault/mount.go @@ -62,15 +62,19 @@ const ( // ListingVisibilityUnauth is the unauth type for listing visibility ListingVisibilityUnauth ListingVisibilityType = "unauth" - systemMountPath = "sys/" - identityMountPath = "identity/" - cubbyholeMountPath = "cubbyhole/" + mountPathSystem = "sys/" + mountPathIdentity = "identity/" + mountPathCubbyhole = "cubbyhole/" - systemMountType = "system" - identityMountType = "identity" - cubbyholeMountType = "cubbyhole" - pluginMountType = "plugin" + mountTypeSystem = "system" + mountTypeNSSystem = "ns_system" + mountTypeIdentity = "identity" + mountTypeCubbyhole = "cubbyhole" + mountTypePlugin = "plugin" + mountTypeKV = "kv" mountTypeNSCubbyhole = "ns_cubbyhole" + mountTypeToken = "token" + mountTypeNSToken = "ns_token" MountTableUpdateStorage = true MountTableNoUpdateStorage = false @@ -91,25 +95,25 @@ var ( protectedMounts = []string{ "audit/", "auth/", - systemMountPath, - cubbyholeMountPath, - identityMountPath, + mountPathSystem, + mountPathCubbyhole, + mountPathIdentity, } untunableMounts = []string{ - cubbyholeMountPath, - systemMountPath, + mountPathCubbyhole, + mountPathSystem, "audit/", - identityMountPath, + mountPathIdentity, } // singletonMounts can only exist in one location and are // loaded by default. These are types, not paths. singletonMounts = []string{ - cubbyholeMountType, - systemMountType, - "token", - identityMountType, + mountTypeCubbyhole, + mountTypeSystem, + mountTypeToken, + mountTypeIdentity, } // mountAliases maps old backend names to new backend names, allowing us @@ -430,7 +434,7 @@ func (e *MountEntry) IsExternalPlugin() bool { // MountClass returns the mount class based on Accessor and Path func (e *MountEntry) MountClass() string { - if e.Accessor == "" || strings.HasPrefix(e.Path, fmt.Sprintf("%s/", systemMountPath)) { + if e.Accessor == "" || strings.HasPrefix(e.Path, fmt.Sprintf("%s/", mountPathSystem)) { return "" } @@ -694,7 +698,7 @@ func (c *Core) mountInternal(ctx context.Context, entry *MountEntry, updateStora // Check for the correct backend type backendType := backend.Type() if backendType != logical.TypeLogical { - if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" { + if entry.Type != mountTypeKV && entry.Type != mountTypeSystem && entry.Type != mountTypeCubbyhole { return fmt.Errorf(`unknown backend type: "%s"`, entry.Type) } } @@ -1336,7 +1340,7 @@ func (c *Core) runMountUpdates(ctx context.Context, needPersist bool) error { entry.Local = true needPersist = true } - if entry.Type == cubbyholeMountType && !entry.Local { + if entry.Type == mountTypeCubbyhole && !entry.Local { entry.Local = true needPersist = true } @@ -1563,7 +1567,7 @@ func (c *Core) setupMounts(ctx context.Context) error { backendType := backend.Type() if backendType != logical.TypeLogical { - if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" { + if entry.Type != mountTypeKV && entry.Type != mountTypeSystem && entry.Type != mountTypeCubbyhole { return fmt.Errorf(`unknown backend type: "%s"`, entry.Type) } } @@ -1692,7 +1696,7 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView } switch { - case entry.Type == "plugin": + case entry.Type == mountTypePlugin: conf["plugin_name"] = entry.Config.PluginName default: conf["plugin_name"] = t @@ -1750,7 +1754,7 @@ func (c *Core) defaultMountTable() *MountTable { if err != nil { panic(fmt.Sprintf("could not create default secret mount UUID: %v", err)) } - mountAccessor, err := c.generateMountAccessor("kv") + mountAccessor, err := c.generateMountAccessor(mountTypeKV) if err != nil { panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err)) } @@ -1762,7 +1766,7 @@ func (c *Core) defaultMountTable() *MountTable { kvMount := &MountEntry{ Table: mountTableType, Path: "secret/", - Type: "kv", + Type: mountTypeKV, Description: "key/value secret storage", UUID: mountUUID, Accessor: mountAccessor, @@ -1798,8 +1802,8 @@ func (c *Core) requiredMountTable() *MountTable { } cubbyholeMount := &MountEntry{ Table: mountTableType, - Path: cubbyholeMountPath, - Type: cubbyholeMountType, + Path: mountPathCubbyhole, + Type: mountTypeCubbyhole, Description: "per-token private secret storage", UUID: cubbyholeUUID, Accessor: cubbyholeAccessor, @@ -1823,7 +1827,7 @@ func (c *Core) requiredMountTable() *MountTable { sysMount := &MountEntry{ Table: mountTableType, Path: "sys/", - Type: systemMountType, + Type: mountTypeSystem, Description: "system endpoints used for control, policy and debugging", UUID: sysUUID, Accessor: sysAccessor, @@ -1899,15 +1903,15 @@ func (c *Core) singletonMountTables() (mounts, auth *MountTable) { func (c *Core) setCoreBackend(entry *MountEntry, backend logical.Backend, view *BarrierView) { switch entry.Type { - case systemMountType: + case mountTypeSystem: c.systemBackend = backend.(*SystemBackend) c.systemBarrierView = view - case cubbyholeMountType: + case mountTypeCubbyhole: ch := backend.(*CubbyholeBackend) ch.saltUUID = entry.UUID ch.storageView = view c.cubbyholeBackend = ch - case identityMountType: + case mountTypeIdentity: c.identityStore = backend.(*IdentityStore) } } diff --git a/vault/mount_util.go b/vault/mount_util.go index 13f141817..d1431f12d 100644 --- a/vault/mount_util.go +++ b/vault/mount_util.go @@ -33,7 +33,7 @@ func runFilteredPathsEvaluation(context.Context, *Core, bool) error { // ViewPath returns storage prefix for the view func (e *MountEntry) ViewPath() string { switch e.Type { - case systemMountType: + case mountTypeSystem: return systemBarrierPrefix case "token": return path.Join(systemBarrierPrefix, tokenSubPath) + "/" diff --git a/vault/plugin_reload.go b/vault/plugin_reload.go index 79054cf8a..e1c946703 100644 --- a/vault/plugin_reload.go +++ b/vault/plugin_reload.go @@ -41,9 +41,9 @@ func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string) // - auth/foo if strings.HasPrefix(mount, credentialRoutePrefix) { isAuth = true - } else if strings.HasPrefix(mount, systemMountPath+credentialRoutePrefix) { + } else if strings.HasPrefix(mount, mountPathSystem+credentialRoutePrefix) { isAuth = true - mount = strings.TrimPrefix(mount, systemMountPath) + mount = strings.TrimPrefix(mount, mountPathSystem) } if !strings.HasSuffix(mount, "/") { mount += "/" diff --git a/vault/router.go b/vault/router.go index 3497e8244..25e738e38 100644 --- a/vault/router.go +++ b/vault/router.go @@ -637,9 +637,9 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc clientToken := req.ClientToken switch { case strings.HasPrefix(originalPath, "auth/token/"): - case strings.HasPrefix(originalPath, "sys/"): - case strings.HasPrefix(originalPath, "identity/"): - case strings.HasPrefix(originalPath, cubbyholeMountPath): + case strings.HasPrefix(originalPath, mountPathSystem): + case strings.HasPrefix(originalPath, mountPathIdentity): + case strings.HasPrefix(originalPath, mountPathCubbyhole): if req.Operation == logical.RollbackOperation { // Backend doesn't support this and it can't properly look up a // cubbyhole ID so just return here @@ -809,7 +809,7 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc } switch re.mountEntry.Type { - case "token", "ns_token": + case mountTypeToken, mountTypeNSToken: // Nothing; we respect what the token store is telling us and // we don't allow tuning default: diff --git a/vault/token_store.go b/vault/token_store.go index 882206fa5..f5c3b122f 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -113,7 +113,7 @@ var ( return errors.New("nil token entry") } - storage := ts.core.router.MatchingStorageByAPIPath(ctx, cubbyholeMountPath) + storage := ts.core.router.MatchingStorageByAPIPath(ctx, mountPathCubbyhole) if storage == nil { return fmt.Errorf("no cubby mount entry") } @@ -2201,7 +2201,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data } // List all the cubbyhole storage keys - view := ts.core.router.MatchingStorageByAPIPath(ctx, cubbyholeMountPath) + view := ts.core.router.MatchingStorageByAPIPath(ctx, mountPathCubbyhole) if view == nil { return fmt.Errorf("no cubby mount entry") } diff --git a/vault/token_store_test.go b/vault/token_store_test.go index 5495cdae3..eec1a9a33 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -135,8 +135,8 @@ func TestTokenStore_CubbyholeTidy(t *testing.T) { func testTokenStore_CubbyholeTidy(t *testing.T, c *Core, root string, nsCtx context.Context) { ts := c.tokenStore - backend := c.router.MatchingBackend(nsCtx, cubbyholeMountPath) - view := c.router.MatchingStorageByAPIPath(nsCtx, cubbyholeMountPath) + backend := c.router.MatchingBackend(nsCtx, mountPathCubbyhole) + view := c.router.MatchingStorageByAPIPath(nsCtx, mountPathCubbyhole) for i := 1; i <= 20; i++ { // Create 20 tokens