diff --git a/builtin/logical/pki/ca_test.go b/builtin/logical/pki/ca_test.go index 66d666a5a..ca0309e8d 100644 --- a/builtin/logical/pki/ca_test.go +++ b/builtin/logical/pki/ca_test.go @@ -82,7 +82,7 @@ func TestBackend_CA_Steps(t *testing.T) { NotBefore: time.Now().Add(-30 * time.Second), NotAfter: time.Now().Add(262980 * time.Hour), BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak) if err != nil { diff --git a/command/base.go b/command/base.go index ae340c504..d71c4a798 100644 --- a/command/base.go +++ b/command/base.go @@ -24,8 +24,8 @@ const ( // maxLineLength is the maximum width of any line. maxLineLength int = 78 - // notSetNamespace is a flag value for a not-set namespace - notSetNamespace = "(not set)" + // notSetValue is a flag value for a not-set value + notSetValue = "(not set)" ) // reRemoveWhitespace is a regular expression for stripping whitespace from @@ -129,10 +129,10 @@ func (c *BaseCommand) Client() (*api.Client, error) { // flagNS takes precedence over flagNamespace. After resolution, point both // flags to the same value to be able to use them interchangeably anywhere. - if c.flagNS != notSetNamespace { + if c.flagNS != notSetValue { c.flagNamespace = c.flagNS } - if c.flagNamespace != notSetNamespace { + if c.flagNamespace != notSetValue { client.SetNamespace(namespace.Canonicalize(c.flagNamespace)) } @@ -256,7 +256,7 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { f.StringVar(&StringVar{ Name: "namespace", Target: &c.flagNamespace, - Default: notSetNamespace, // this can never be a real value + Default: notSetValue, // this can never be a real value EnvVar: "VAULT_NAMESPACE", Completion: complete.PredictAnything, Usage: "The namespace to use for the command. Setting this is not " + @@ -267,7 +267,7 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { f.StringVar(&StringVar{ Name: "ns", Target: &c.flagNS, - Default: notSetNamespace, // this can never be a real value + Default: notSetValue, // this can never be a real value Completion: complete.PredictAnything, Hidden: true, Usage: "Alias for -namespace. This takes precedence over -namespace.", diff --git a/command/server.go b/command/server.go index 245f28864..84194d348 100644 --- a/command/server.go +++ b/command/server.go @@ -152,7 +152,7 @@ func (c *ServerCommand) Flags() *FlagSets { f.StringVar(&StringVar{ Name: "log-level", Target: &c.flagLogLevel, - Default: "info", + Default: notSetValue, EnvVar: "VAULT_LOG_LEVEL", Completion: complete.PredictSet("trace", "debug", "info", "warn", "err"), Usage: "Log verbosity level. Supported values (in order of detail) are " + @@ -307,8 +307,12 @@ func (c *ServerCommand) Run(args []string) int { c.logWriter = os.Stdout } var level log.Level + var logLevelWasNotSet bool c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel)) switch c.flagLogLevel { + case notSetValue: + logLevelWasNotSet = true + level = log.Info case "trace": level = log.Trace case "debug": @@ -334,10 +338,7 @@ func (c *ServerCommand) Run(args []string) int { c.logger = logging.NewVaultLoggerWithWriter(c.logWriter, level) } - grpclog.SetLogger(&grpclogFaker{ - logger: c.logger.Named("grpclogfaker"), - log: os.Getenv("VAULT_GRPC_LOGGING") != "", - }) + allLoggers := []log.Logger{c.logger} // Automatically enable dev mode if other dev flags are provided. if c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster { @@ -390,6 +391,32 @@ func (c *ServerCommand) Run(args []string) int { return 1 } + if config.LogLevel != "" && logLevelWasNotSet { + configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) + switch configLogLevel { + case "trace": + c.logger.SetLevel(log.Trace) + case "debug": + c.logger.SetLevel(log.Debug) + case "notice", "info", "": + c.logger.SetLevel(log.Info) + case "warn", "warning": + c.logger.SetLevel(log.Warn) + case "err", "error": + c.logger.SetLevel(log.Error) + default: + c.UI.Error(fmt.Sprintf("Unknown log level: %s", config.LogLevel)) + return 1 + } + } + + namedGRPCLogFaker := c.logger.Named("grpclogfaker") + allLoggers = append(allLoggers, namedGRPCLogFaker) + grpclog.SetLogger(&grpclogFaker{ + logger: namedGRPCLogFaker, + log: os.Getenv("VAULT_GRPC_LOGGING") != "", + }) + // Ensure that a backend is provided if config.Storage == nil { c.UI.Output("A storage backend must be specified") @@ -423,7 +450,9 @@ func (c *ServerCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type)) return 1 } - backend, err := factory(config.Storage.Config, c.logger.Named("storage."+config.Storage.Type)) + namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) + allLoggers = append(allLoggers, namedStorageLogger) + backend, err := factory(config.Storage.Config, namedStorageLogger) if err != nil { c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err)) return 1 @@ -469,6 +498,7 @@ func (c *ServerCommand) Run(args []string) int { PluginDirectory: config.PluginDirectory, EnableUI: config.EnableUI, EnableRaw: config.EnableRawEndpoint, + AllLoggers: allLoggers, } if c.flagDev { coreConfig.DevToken = c.flagDevRootTokenID @@ -1039,6 +1069,51 @@ CLUSTER_SYNTHESIS_COMPLETE: case <-c.SighupCh: c.UI.Output("==> Vault reload triggered") + + // Check for new log level + var config *server.Config + var level log.Level + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path, c.logger) + if err != nil { + c.logger.Error("could not reload config", "path", path, "error", err) + goto RUNRELOADFUNCS + } + + if config == nil { + config = current + } else { + config = config.Merge(current) + } + } + + // Ensure at least one config was found. + if config == nil { + c.logger.Error("no config found at reload time") + goto RUNRELOADFUNCS + } + + if config.LogLevel != "" { + configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel)) + switch configLogLevel { + case "trace": + level = log.Trace + case "debug": + level = log.Debug + case "notice", "info", "": + level = log.Info + case "warn", "warning": + level = log.Warn + case "err", "error": + level = log.Error + default: + c.logger.Error("unknown log level found on reload", "level", config.LogLevel) + goto RUNRELOADFUNCS + } + core.SetLogLevel(level) + } + + RUNRELOADFUNCS: if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil { c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } diff --git a/command/server/config.go b/command/server/config.go index eb7ff3d6d..715120a98 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -53,6 +53,8 @@ type Config struct { PluginDirectory string `hcl:"plugin_directory"` + LogLevel string `hcl:"log_level"` + PidFile string `hcl:"pid_file"` EnableRawEndpoint bool `hcl:"-"` EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"` @@ -299,6 +301,11 @@ func (c *Config) Merge(c2 *Config) *Config { result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration } + result.LogLevel = c.LogLevel + if c2.LogLevel != "" { + result.LogLevel = c2.LogLevel + } + result.ClusterName = c.ClusterName if c2.ClusterName != "" { result.ClusterName = c2.ClusterName diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 67c05b9b1..3518ed8f2 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -347,7 +347,7 @@ func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string packer := &StoragePacker{ view: view, viewPrefix: viewPrefix, - logger: logger.Named("storagepacker"), + logger: logger, storageLocks: locksutil.CreateLocks(), } diff --git a/vault/audit.go b/vault/audit.go index b7dd8ebf4..491f47196 100644 --- a/vault/audit.go +++ b/vault/audit.go @@ -308,7 +308,9 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo // setupAudit is invoked after we've loaded the audit able to // initialize the audit backends func (c *Core) setupAudits(ctx context.Context) error { - broker := NewAuditBroker(c.baseLogger.Named("audit")) + brokerLogger := c.baseLogger.Named("audit") + c.AddLogger(brokerLogger) + broker := NewAuditBroker(brokerLogger) c.auditLock.Lock() defer c.auditLock.Unlock() @@ -413,6 +415,7 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi } auditLogger := c.baseLogger.Named("audit") + c.AddLogger(auditLogger) switch entry.Type { case "file": diff --git a/vault/auth.go b/vault/auth.go index 0b9d052f3..594c46ccb 100644 --- a/vault/auth.go +++ b/vault/auth.go @@ -578,9 +578,11 @@ func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysV conf["plugin_name"] = entry.Config.PluginName } + authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor)) + c.AddLogger(authLogger) config := &logical.BackendConfig{ StorageView: view, - Logger: c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor)), + Logger: authLogger, Config: conf, System: sysView, BackendUUID: entry.BackendAwareUUID, diff --git a/vault/core.go b/vault/core.go index a373bc64d..b6ea19c35 100644 --- a/vault/core.go +++ b/vault/core.go @@ -367,6 +367,10 @@ type Core struct { // Stores any funcs that should be run on successful postUnseal postUnsealFuncs []func() + + // Stores loggers so we can reset the level + allLoggers []log.Logger + allLoggersLock sync.RWMutex } // CoreConfig is used to parameterize a core @@ -420,6 +424,8 @@ type CoreConfig struct { ReloadFuncs *map[string][]reload.ReloadFunc ReloadFuncsLock *sync.RWMutex + + AllLoggers []log.Logger } // NewCore is used to construct a new core @@ -488,9 +494,11 @@ func NewCore(conf *CoreConfig) (*Core, error) { activeNodeReplicationState: new(uint32), keepHALockOnStepDown: new(uint32), activeContextCancelFunc: new(atomic.Value), + allLoggers: conf.AllLoggers, } atomic.StoreUint32(c.sealed, 1) + c.allLoggers = append(c.allLoggers, c.logger) atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled)) c.localClusterCert.Store(([]byte)(nil)) @@ -520,15 +528,19 @@ func NewCore(conf *CoreConfig) (*Core, error) { } c.seal.SetCore(c) - c.sealUnwrapper = NewSealUnwrapper(phys, c.baseLogger.Named("storage.sealunwrapper")) + unwrapperLogger := c.baseLogger.Named("storage.sealunwrapper") + c.allLoggers = append(c.allLoggers, unwrapperLogger) + c.sealUnwrapper = NewSealUnwrapper(phys, unwrapperLogger) var ok bool // Wrap the physical backend in a cache layer if enabled + cacheLogger := c.baseLogger.Named("storage.cache") + c.allLoggers = append(c.allLoggers, cacheLogger) if txnOK { - c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, c.baseLogger.Named("storage.cache")) + c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger) } else { - c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, c.baseLogger.Named("storage.cache")) + c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, cacheLogger) } c.physicalCache = c.physical.(physical.ToggleablePurgemonster) @@ -585,7 +597,9 @@ func NewCore(conf *CoreConfig) (*Core, error) { } logicalBackends["cubbyhole"] = CubbyholeBackendFactory logicalBackends["system"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - b := NewSystemBackend(c, conf.Logger.Named("system")) + sysBackendLogger := conf.Logger.Named("system") + c.AddLogger(sysBackendLogger) + b := NewSystemBackend(c, sysBackendLogger) if err := b.Setup(ctx, config); err != nil { return nil, err } @@ -593,7 +607,9 @@ func NewCore(conf *CoreConfig) (*Core, error) { } logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - return NewIdentityStore(ctx, c, config, conf.Logger.Named("identity")) + identityLogger := conf.Logger.Named("identity") + c.AddLogger(identityLogger) + return NewIdentityStore(ctx, c, config, identityLogger) } c.logicalBackends = logicalBackends @@ -603,7 +619,9 @@ func NewCore(conf *CoreConfig) (*Core, error) { credentialBackends[k] = f } credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - return NewTokenStore(ctx, conf.Logger.Named("token"), c, config) + tsLogger := conf.Logger.Named("token") + c.AddLogger(tsLogger) + return NewTokenStore(ctx, tsLogger, c, config) } c.credentialBackends = credentialBackends @@ -1456,3 +1474,17 @@ func (c *Core) RouterAccess() *RouterAccess { func (c *Core) IsDRSecondary() bool { return c.ReplicationState().HasState(consts.ReplicationDRSecondary) } + +func (c *Core) AddLogger(logger log.Logger) { + c.allLoggersLock.Lock() + defer c.allLoggersLock.Unlock() + c.allLoggers = append(c.allLoggers, logger) +} + +func (c *Core) SetLogLevel(level log.Level) { + c.allLoggersLock.RLock() + defer c.allLoggersLock.RUnlock() + for _, logger := range c.allLoggers { + logger.SetLevel(level) + } +} diff --git a/vault/expiration.go b/vault/expiration.go index a1420f765..191b1de11 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -132,7 +132,9 @@ func (c *Core) setupExpiration() error { view := c.systemBarrierView.SubView(expirationSubPath) // Create the manager - mgr := NewExpirationManager(c, view, c.baseLogger.Named("expiration")) + expLogger := c.baseLogger.Named("expiration") + c.AddLogger(expLogger) + mgr := NewExpirationManager(c, view, expLogger) c.expiration = mgr // Link the token store to this @@ -194,6 +196,7 @@ func (m *ExpirationManager) Tidy() error { var tidyErrors *multierror.Error logger := m.logger.Named("tidy") + m.core.AddLogger(logger) if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) { logger.Warn("tidy operation on leases is already in progress") diff --git a/vault/identity_store.go b/vault/identity_store.go index afd782964..e5860bfb3 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -40,12 +40,16 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo core: core, } - iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, iStore.logger, "") + entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities") + core.AddLogger(entitiesPackerLogger) + groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") + core.AddLogger(groupsPackerLogger) + iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "") if err != nil { return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } - iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, iStore.logger, groupBucketsPrefix) + iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, groupsPackerLogger, groupBucketsPrefix) if err != nil { return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err) } diff --git a/vault/mount.go b/vault/mount.go index 6b8e43dbd..2eba6bd6a 100644 --- a/vault/mount.go +++ b/vault/mount.go @@ -923,9 +923,11 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView conf["plugin_name"] = entry.Config.PluginName } + backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor)) + c.AddLogger(backendLogger) config := &logical.BackendConfig{ StorageView: view, - Logger: c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor)), + Logger: backendLogger, Config: conf, System: sysView, BackendUUID: entry.BackendAwareUUID, diff --git a/vault/policy_store.go b/vault/policy_store.go index 9df9c174b..b2d8cea03 100644 --- a/vault/policy_store.go +++ b/vault/policy_store.go @@ -204,7 +204,9 @@ func NewPolicyStore(ctx context.Context, core *Core, baseView *BarrierView, syst func (c *Core) setupPolicyStore(ctx context.Context) error { // Create the policy store sysView := &dynamicSystemView{core: c} - c.policyStore = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, c.baseLogger.Named("policy")) + psLogger := c.baseLogger.Named("policy") + c.AddLogger(psLogger) + c.policyStore = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger) if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) { // Policies will sync from the primary diff --git a/vault/rollback.go b/vault/rollback.go index 48ab51b6f..7d9d87311 100644 --- a/vault/rollback.go +++ b/vault/rollback.go @@ -242,7 +242,9 @@ func (c *Core) startRollback() error { } return ret } - c.rollback = NewRollbackManager(c.activeContext, c.baseLogger.Named("rollback"), backendsFunc, c.router, c) + rollbackLogger := c.baseLogger.Named("rollback") + c.AddLogger(rollbackLogger) + c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c) c.rollback.Start() return nil } diff --git a/vault/token_store.go b/vault/token_store.go index f10a43e81..77f1ce14c 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -135,6 +135,8 @@ type TokenStore struct { tidyLock *uint32 + core *Core + identityPoliciesDeriverFunc func(string) (*identity.Entity, []string, error) quitContext context.Context @@ -148,12 +150,13 @@ func NewTokenStore(ctx context.Context, logger log.Logger, c *Core, config *logi // Initialize the store t := &TokenStore{ - view: view, - cubbyholeDestroyer: destroyCubbyhole, - logger: logger, - tokenLocks: locksutil.CreateLocks(), - tokensPendingDeletion: &sync.Map{}, - saltLock: sync.RWMutex{}, + view: view, + cubbyholeDestroyer: destroyCubbyhole, + logger: logger, + tokenLocks: locksutil.CreateLocks(), + tokensPendingDeletion: &sync.Map{}, + saltLock: sync.RWMutex{}, + core: c, identityPoliciesDeriverFunc: c.fetchEntityAndDerivedPolicies, tidyLock: new(uint32), quitContext: c.activeContext, @@ -1309,6 +1312,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data ctx = ts.quitContext logger := ts.logger.Named("tidy") + ts.core.AddLogger(logger) var tidyErrors *multierror.Error diff --git a/website/source/docs/configuration/index.html.md b/website/source/docs/configuration/index.html.md index c53d4217b..5ad1bce9c 100644 --- a/website/source/docs/configuration/index.html.md +++ b/website/source/docs/configuration/index.html.md @@ -105,6 +105,13 @@ to specify where the configuration is. - `telemetry` ([Telemetry][telemetry]: <none>) – Specifies the telemetry reporting system. +- `log_level` `(string: "")` – Specifies the log level to use; overridden by + CLI and env var parameters. On SIGHUP, Vault will update the log level to the + current value specified here (including overriding the CLI/env var + parameters). Not all parts of Vault's logging can have its level be changed + dynamically this way; in particular, secrets/auth plugins are currently not + updated dynamically. + - `default_lease_ttl` `(string: "768h")` – Specifies the default lease duration for tokens and secrets. This is specified using a label suffix like `"30s"` or `"1h"`. This value cannot be larger than `max_lease_ttl`.