Allow most parts of Vault's logging to have its level changed on-the-fly (#5280)
* Allow most parts of Vault's logging to have its level changed on-the-fly * Use a const for not set
This commit is contained in:
parent
0a482e9bd2
commit
c28ed23972
|
@ -82,7 +82,7 @@ func TestBackend_CA_Steps(t *testing.T) {
|
||||||
NotBefore: time.Now().Add(-30 * time.Second),
|
NotBefore: time.Now().Add(-30 * time.Second),
|
||||||
NotAfter: time.Now().Add(262980 * time.Hour),
|
NotAfter: time.Now().Add(262980 * time.Hour),
|
||||||
BasicConstraintsValid: true,
|
BasicConstraintsValid: true,
|
||||||
IsCA: true,
|
IsCA: true,
|
||||||
}
|
}
|
||||||
caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak)
|
caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -24,8 +24,8 @@ const (
|
||||||
// maxLineLength is the maximum width of any line.
|
// maxLineLength is the maximum width of any line.
|
||||||
maxLineLength int = 78
|
maxLineLength int = 78
|
||||||
|
|
||||||
// notSetNamespace is a flag value for a not-set namespace
|
// notSetValue is a flag value for a not-set value
|
||||||
notSetNamespace = "(not set)"
|
notSetValue = "(not set)"
|
||||||
)
|
)
|
||||||
|
|
||||||
// reRemoveWhitespace is a regular expression for stripping whitespace from
|
// reRemoveWhitespace is a regular expression for stripping whitespace from
|
||||||
|
@ -129,10 +129,10 @@ func (c *BaseCommand) Client() (*api.Client, error) {
|
||||||
|
|
||||||
// flagNS takes precedence over flagNamespace. After resolution, point both
|
// flagNS takes precedence over flagNamespace. After resolution, point both
|
||||||
// flags to the same value to be able to use them interchangeably anywhere.
|
// flags to the same value to be able to use them interchangeably anywhere.
|
||||||
if c.flagNS != notSetNamespace {
|
if c.flagNS != notSetValue {
|
||||||
c.flagNamespace = c.flagNS
|
c.flagNamespace = c.flagNS
|
||||||
}
|
}
|
||||||
if c.flagNamespace != notSetNamespace {
|
if c.flagNamespace != notSetValue {
|
||||||
client.SetNamespace(namespace.Canonicalize(c.flagNamespace))
|
client.SetNamespace(namespace.Canonicalize(c.flagNamespace))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets {
|
||||||
f.StringVar(&StringVar{
|
f.StringVar(&StringVar{
|
||||||
Name: "namespace",
|
Name: "namespace",
|
||||||
Target: &c.flagNamespace,
|
Target: &c.flagNamespace,
|
||||||
Default: notSetNamespace, // this can never be a real value
|
Default: notSetValue, // this can never be a real value
|
||||||
EnvVar: "VAULT_NAMESPACE",
|
EnvVar: "VAULT_NAMESPACE",
|
||||||
Completion: complete.PredictAnything,
|
Completion: complete.PredictAnything,
|
||||||
Usage: "The namespace to use for the command. Setting this is not " +
|
Usage: "The namespace to use for the command. Setting this is not " +
|
||||||
|
@ -267,7 +267,7 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets {
|
||||||
f.StringVar(&StringVar{
|
f.StringVar(&StringVar{
|
||||||
Name: "ns",
|
Name: "ns",
|
||||||
Target: &c.flagNS,
|
Target: &c.flagNS,
|
||||||
Default: notSetNamespace, // this can never be a real value
|
Default: notSetValue, // this can never be a real value
|
||||||
Completion: complete.PredictAnything,
|
Completion: complete.PredictAnything,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
Usage: "Alias for -namespace. This takes precedence over -namespace.",
|
Usage: "Alias for -namespace. This takes precedence over -namespace.",
|
||||||
|
|
|
@ -152,7 +152,7 @@ func (c *ServerCommand) Flags() *FlagSets {
|
||||||
f.StringVar(&StringVar{
|
f.StringVar(&StringVar{
|
||||||
Name: "log-level",
|
Name: "log-level",
|
||||||
Target: &c.flagLogLevel,
|
Target: &c.flagLogLevel,
|
||||||
Default: "info",
|
Default: notSetValue,
|
||||||
EnvVar: "VAULT_LOG_LEVEL",
|
EnvVar: "VAULT_LOG_LEVEL",
|
||||||
Completion: complete.PredictSet("trace", "debug", "info", "warn", "err"),
|
Completion: complete.PredictSet("trace", "debug", "info", "warn", "err"),
|
||||||
Usage: "Log verbosity level. Supported values (in order of detail) are " +
|
Usage: "Log verbosity level. Supported values (in order of detail) are " +
|
||||||
|
@ -307,8 +307,12 @@ func (c *ServerCommand) Run(args []string) int {
|
||||||
c.logWriter = os.Stdout
|
c.logWriter = os.Stdout
|
||||||
}
|
}
|
||||||
var level log.Level
|
var level log.Level
|
||||||
|
var logLevelWasNotSet bool
|
||||||
c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel))
|
c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel))
|
||||||
switch c.flagLogLevel {
|
switch c.flagLogLevel {
|
||||||
|
case notSetValue:
|
||||||
|
logLevelWasNotSet = true
|
||||||
|
level = log.Info
|
||||||
case "trace":
|
case "trace":
|
||||||
level = log.Trace
|
level = log.Trace
|
||||||
case "debug":
|
case "debug":
|
||||||
|
@ -334,10 +338,7 @@ func (c *ServerCommand) Run(args []string) int {
|
||||||
c.logger = logging.NewVaultLoggerWithWriter(c.logWriter, level)
|
c.logger = logging.NewVaultLoggerWithWriter(c.logWriter, level)
|
||||||
}
|
}
|
||||||
|
|
||||||
grpclog.SetLogger(&grpclogFaker{
|
allLoggers := []log.Logger{c.logger}
|
||||||
logger: c.logger.Named("grpclogfaker"),
|
|
||||||
log: os.Getenv("VAULT_GRPC_LOGGING") != "",
|
|
||||||
})
|
|
||||||
|
|
||||||
// Automatically enable dev mode if other dev flags are provided.
|
// Automatically enable dev mode if other dev flags are provided.
|
||||||
if c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster {
|
if c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster {
|
||||||
|
@ -390,6 +391,32 @@ func (c *ServerCommand) Run(args []string) int {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.LogLevel != "" && logLevelWasNotSet {
|
||||||
|
configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel))
|
||||||
|
switch configLogLevel {
|
||||||
|
case "trace":
|
||||||
|
c.logger.SetLevel(log.Trace)
|
||||||
|
case "debug":
|
||||||
|
c.logger.SetLevel(log.Debug)
|
||||||
|
case "notice", "info", "":
|
||||||
|
c.logger.SetLevel(log.Info)
|
||||||
|
case "warn", "warning":
|
||||||
|
c.logger.SetLevel(log.Warn)
|
||||||
|
case "err", "error":
|
||||||
|
c.logger.SetLevel(log.Error)
|
||||||
|
default:
|
||||||
|
c.UI.Error(fmt.Sprintf("Unknown log level: %s", config.LogLevel))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
namedGRPCLogFaker := c.logger.Named("grpclogfaker")
|
||||||
|
allLoggers = append(allLoggers, namedGRPCLogFaker)
|
||||||
|
grpclog.SetLogger(&grpclogFaker{
|
||||||
|
logger: namedGRPCLogFaker,
|
||||||
|
log: os.Getenv("VAULT_GRPC_LOGGING") != "",
|
||||||
|
})
|
||||||
|
|
||||||
// Ensure that a backend is provided
|
// Ensure that a backend is provided
|
||||||
if config.Storage == nil {
|
if config.Storage == nil {
|
||||||
c.UI.Output("A storage backend must be specified")
|
c.UI.Output("A storage backend must be specified")
|
||||||
|
@ -423,7 +450,9 @@ func (c *ServerCommand) Run(args []string) int {
|
||||||
c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type))
|
c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type))
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
backend, err := factory(config.Storage.Config, c.logger.Named("storage."+config.Storage.Type))
|
namedStorageLogger := c.logger.Named("storage." + config.Storage.Type)
|
||||||
|
allLoggers = append(allLoggers, namedStorageLogger)
|
||||||
|
backend, err := factory(config.Storage.Config, namedStorageLogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err))
|
c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err))
|
||||||
return 1
|
return 1
|
||||||
|
@ -469,6 +498,7 @@ func (c *ServerCommand) Run(args []string) int {
|
||||||
PluginDirectory: config.PluginDirectory,
|
PluginDirectory: config.PluginDirectory,
|
||||||
EnableUI: config.EnableUI,
|
EnableUI: config.EnableUI,
|
||||||
EnableRaw: config.EnableRawEndpoint,
|
EnableRaw: config.EnableRawEndpoint,
|
||||||
|
AllLoggers: allLoggers,
|
||||||
}
|
}
|
||||||
if c.flagDev {
|
if c.flagDev {
|
||||||
coreConfig.DevToken = c.flagDevRootTokenID
|
coreConfig.DevToken = c.flagDevRootTokenID
|
||||||
|
@ -1039,6 +1069,51 @@ CLUSTER_SYNTHESIS_COMPLETE:
|
||||||
|
|
||||||
case <-c.SighupCh:
|
case <-c.SighupCh:
|
||||||
c.UI.Output("==> Vault reload triggered")
|
c.UI.Output("==> Vault reload triggered")
|
||||||
|
|
||||||
|
// Check for new log level
|
||||||
|
var config *server.Config
|
||||||
|
var level log.Level
|
||||||
|
for _, path := range c.flagConfigs {
|
||||||
|
current, err := server.LoadConfig(path, c.logger)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error("could not reload config", "path", path, "error", err)
|
||||||
|
goto RUNRELOADFUNCS
|
||||||
|
}
|
||||||
|
|
||||||
|
if config == nil {
|
||||||
|
config = current
|
||||||
|
} else {
|
||||||
|
config = config.Merge(current)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure at least one config was found.
|
||||||
|
if config == nil {
|
||||||
|
c.logger.Error("no config found at reload time")
|
||||||
|
goto RUNRELOADFUNCS
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.LogLevel != "" {
|
||||||
|
configLogLevel := strings.ToLower(strings.TrimSpace(config.LogLevel))
|
||||||
|
switch configLogLevel {
|
||||||
|
case "trace":
|
||||||
|
level = log.Trace
|
||||||
|
case "debug":
|
||||||
|
level = log.Debug
|
||||||
|
case "notice", "info", "":
|
||||||
|
level = log.Info
|
||||||
|
case "warn", "warning":
|
||||||
|
level = log.Warn
|
||||||
|
case "err", "error":
|
||||||
|
level = log.Error
|
||||||
|
default:
|
||||||
|
c.logger.Error("unknown log level found on reload", "level", config.LogLevel)
|
||||||
|
goto RUNRELOADFUNCS
|
||||||
|
}
|
||||||
|
core.SetLogLevel(level)
|
||||||
|
}
|
||||||
|
|
||||||
|
RUNRELOADFUNCS:
|
||||||
if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil {
|
if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil {
|
||||||
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
|
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,8 @@ type Config struct {
|
||||||
|
|
||||||
PluginDirectory string `hcl:"plugin_directory"`
|
PluginDirectory string `hcl:"plugin_directory"`
|
||||||
|
|
||||||
|
LogLevel string `hcl:"log_level"`
|
||||||
|
|
||||||
PidFile string `hcl:"pid_file"`
|
PidFile string `hcl:"pid_file"`
|
||||||
EnableRawEndpoint bool `hcl:"-"`
|
EnableRawEndpoint bool `hcl:"-"`
|
||||||
EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"`
|
EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"`
|
||||||
|
@ -299,6 +301,11 @@ func (c *Config) Merge(c2 *Config) *Config {
|
||||||
result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration
|
result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result.LogLevel = c.LogLevel
|
||||||
|
if c2.LogLevel != "" {
|
||||||
|
result.LogLevel = c2.LogLevel
|
||||||
|
}
|
||||||
|
|
||||||
result.ClusterName = c.ClusterName
|
result.ClusterName = c.ClusterName
|
||||||
if c2.ClusterName != "" {
|
if c2.ClusterName != "" {
|
||||||
result.ClusterName = c2.ClusterName
|
result.ClusterName = c2.ClusterName
|
||||||
|
|
|
@ -347,7 +347,7 @@ func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string
|
||||||
packer := &StoragePacker{
|
packer := &StoragePacker{
|
||||||
view: view,
|
view: view,
|
||||||
viewPrefix: viewPrefix,
|
viewPrefix: viewPrefix,
|
||||||
logger: logger.Named("storagepacker"),
|
logger: logger,
|
||||||
storageLocks: locksutil.CreateLocks(),
|
storageLocks: locksutil.CreateLocks(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -308,7 +308,9 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo
|
||||||
// setupAudit is invoked after we've loaded the audit able to
|
// setupAudit is invoked after we've loaded the audit able to
|
||||||
// initialize the audit backends
|
// initialize the audit backends
|
||||||
func (c *Core) setupAudits(ctx context.Context) error {
|
func (c *Core) setupAudits(ctx context.Context) error {
|
||||||
broker := NewAuditBroker(c.baseLogger.Named("audit"))
|
brokerLogger := c.baseLogger.Named("audit")
|
||||||
|
c.AddLogger(brokerLogger)
|
||||||
|
broker := NewAuditBroker(brokerLogger)
|
||||||
|
|
||||||
c.auditLock.Lock()
|
c.auditLock.Lock()
|
||||||
defer c.auditLock.Unlock()
|
defer c.auditLock.Unlock()
|
||||||
|
@ -413,6 +415,7 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi
|
||||||
}
|
}
|
||||||
|
|
||||||
auditLogger := c.baseLogger.Named("audit")
|
auditLogger := c.baseLogger.Named("audit")
|
||||||
|
c.AddLogger(auditLogger)
|
||||||
|
|
||||||
switch entry.Type {
|
switch entry.Type {
|
||||||
case "file":
|
case "file":
|
||||||
|
|
|
@ -578,9 +578,11 @@ func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysV
|
||||||
conf["plugin_name"] = entry.Config.PluginName
|
conf["plugin_name"] = entry.Config.PluginName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor))
|
||||||
|
c.AddLogger(authLogger)
|
||||||
config := &logical.BackendConfig{
|
config := &logical.BackendConfig{
|
||||||
StorageView: view,
|
StorageView: view,
|
||||||
Logger: c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor)),
|
Logger: authLogger,
|
||||||
Config: conf,
|
Config: conf,
|
||||||
System: sysView,
|
System: sysView,
|
||||||
BackendUUID: entry.BackendAwareUUID,
|
BackendUUID: entry.BackendAwareUUID,
|
||||||
|
|
|
@ -367,6 +367,10 @@ type Core struct {
|
||||||
|
|
||||||
// Stores any funcs that should be run on successful postUnseal
|
// Stores any funcs that should be run on successful postUnseal
|
||||||
postUnsealFuncs []func()
|
postUnsealFuncs []func()
|
||||||
|
|
||||||
|
// Stores loggers so we can reset the level
|
||||||
|
allLoggers []log.Logger
|
||||||
|
allLoggersLock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// CoreConfig is used to parameterize a core
|
// CoreConfig is used to parameterize a core
|
||||||
|
@ -420,6 +424,8 @@ type CoreConfig struct {
|
||||||
|
|
||||||
ReloadFuncs *map[string][]reload.ReloadFunc
|
ReloadFuncs *map[string][]reload.ReloadFunc
|
||||||
ReloadFuncsLock *sync.RWMutex
|
ReloadFuncsLock *sync.RWMutex
|
||||||
|
|
||||||
|
AllLoggers []log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCore is used to construct a new core
|
// NewCore is used to construct a new core
|
||||||
|
@ -488,9 +494,11 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
||||||
activeNodeReplicationState: new(uint32),
|
activeNodeReplicationState: new(uint32),
|
||||||
keepHALockOnStepDown: new(uint32),
|
keepHALockOnStepDown: new(uint32),
|
||||||
activeContextCancelFunc: new(atomic.Value),
|
activeContextCancelFunc: new(atomic.Value),
|
||||||
|
allLoggers: conf.AllLoggers,
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreUint32(c.sealed, 1)
|
atomic.StoreUint32(c.sealed, 1)
|
||||||
|
c.allLoggers = append(c.allLoggers, c.logger)
|
||||||
|
|
||||||
atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled))
|
atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled))
|
||||||
c.localClusterCert.Store(([]byte)(nil))
|
c.localClusterCert.Store(([]byte)(nil))
|
||||||
|
@ -520,15 +528,19 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
||||||
}
|
}
|
||||||
c.seal.SetCore(c)
|
c.seal.SetCore(c)
|
||||||
|
|
||||||
c.sealUnwrapper = NewSealUnwrapper(phys, c.baseLogger.Named("storage.sealunwrapper"))
|
unwrapperLogger := c.baseLogger.Named("storage.sealunwrapper")
|
||||||
|
c.allLoggers = append(c.allLoggers, unwrapperLogger)
|
||||||
|
c.sealUnwrapper = NewSealUnwrapper(phys, unwrapperLogger)
|
||||||
|
|
||||||
var ok bool
|
var ok bool
|
||||||
|
|
||||||
// Wrap the physical backend in a cache layer if enabled
|
// Wrap the physical backend in a cache layer if enabled
|
||||||
|
cacheLogger := c.baseLogger.Named("storage.cache")
|
||||||
|
c.allLoggers = append(c.allLoggers, cacheLogger)
|
||||||
if txnOK {
|
if txnOK {
|
||||||
c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, c.baseLogger.Named("storage.cache"))
|
c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger)
|
||||||
} else {
|
} else {
|
||||||
c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, c.baseLogger.Named("storage.cache"))
|
c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, cacheLogger)
|
||||||
}
|
}
|
||||||
c.physicalCache = c.physical.(physical.ToggleablePurgemonster)
|
c.physicalCache = c.physical.(physical.ToggleablePurgemonster)
|
||||||
|
|
||||||
|
@ -585,7 +597,9 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
||||||
}
|
}
|
||||||
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
|
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
|
||||||
logicalBackends["system"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
logicalBackends["system"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||||
b := NewSystemBackend(c, conf.Logger.Named("system"))
|
sysBackendLogger := conf.Logger.Named("system")
|
||||||
|
c.AddLogger(sysBackendLogger)
|
||||||
|
b := NewSystemBackend(c, sysBackendLogger)
|
||||||
if err := b.Setup(ctx, config); err != nil {
|
if err := b.Setup(ctx, config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -593,7 +607,9 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||||
return NewIdentityStore(ctx, c, config, conf.Logger.Named("identity"))
|
identityLogger := conf.Logger.Named("identity")
|
||||||
|
c.AddLogger(identityLogger)
|
||||||
|
return NewIdentityStore(ctx, c, config, identityLogger)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logicalBackends = logicalBackends
|
c.logicalBackends = logicalBackends
|
||||||
|
@ -603,7 +619,9 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
||||||
credentialBackends[k] = f
|
credentialBackends[k] = f
|
||||||
}
|
}
|
||||||
credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||||
return NewTokenStore(ctx, conf.Logger.Named("token"), c, config)
|
tsLogger := conf.Logger.Named("token")
|
||||||
|
c.AddLogger(tsLogger)
|
||||||
|
return NewTokenStore(ctx, tsLogger, c, config)
|
||||||
}
|
}
|
||||||
c.credentialBackends = credentialBackends
|
c.credentialBackends = credentialBackends
|
||||||
|
|
||||||
|
@ -1456,3 +1474,17 @@ func (c *Core) RouterAccess() *RouterAccess {
|
||||||
func (c *Core) IsDRSecondary() bool {
|
func (c *Core) IsDRSecondary() bool {
|
||||||
return c.ReplicationState().HasState(consts.ReplicationDRSecondary)
|
return c.ReplicationState().HasState(consts.ReplicationDRSecondary)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Core) AddLogger(logger log.Logger) {
|
||||||
|
c.allLoggersLock.Lock()
|
||||||
|
defer c.allLoggersLock.Unlock()
|
||||||
|
c.allLoggers = append(c.allLoggers, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Core) SetLogLevel(level log.Level) {
|
||||||
|
c.allLoggersLock.RLock()
|
||||||
|
defer c.allLoggersLock.RUnlock()
|
||||||
|
for _, logger := range c.allLoggers {
|
||||||
|
logger.SetLevel(level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -132,7 +132,9 @@ func (c *Core) setupExpiration() error {
|
||||||
view := c.systemBarrierView.SubView(expirationSubPath)
|
view := c.systemBarrierView.SubView(expirationSubPath)
|
||||||
|
|
||||||
// Create the manager
|
// Create the manager
|
||||||
mgr := NewExpirationManager(c, view, c.baseLogger.Named("expiration"))
|
expLogger := c.baseLogger.Named("expiration")
|
||||||
|
c.AddLogger(expLogger)
|
||||||
|
mgr := NewExpirationManager(c, view, expLogger)
|
||||||
c.expiration = mgr
|
c.expiration = mgr
|
||||||
|
|
||||||
// Link the token store to this
|
// Link the token store to this
|
||||||
|
@ -194,6 +196,7 @@ func (m *ExpirationManager) Tidy() error {
|
||||||
var tidyErrors *multierror.Error
|
var tidyErrors *multierror.Error
|
||||||
|
|
||||||
logger := m.logger.Named("tidy")
|
logger := m.logger.Named("tidy")
|
||||||
|
m.core.AddLogger(logger)
|
||||||
|
|
||||||
if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
|
if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
|
||||||
logger.Warn("tidy operation on leases is already in progress")
|
logger.Warn("tidy operation on leases is already in progress")
|
||||||
|
|
|
@ -40,12 +40,16 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo
|
||||||
core: core,
|
core: core,
|
||||||
}
|
}
|
||||||
|
|
||||||
iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, iStore.logger, "")
|
entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities")
|
||||||
|
core.AddLogger(entitiesPackerLogger)
|
||||||
|
groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups")
|
||||||
|
core.AddLogger(groupsPackerLogger)
|
||||||
|
iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err)
|
return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, iStore.logger, groupBucketsPrefix)
|
iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, groupsPackerLogger, groupBucketsPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err)
|
return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -923,9 +923,11 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView
|
||||||
conf["plugin_name"] = entry.Config.PluginName
|
conf["plugin_name"] = entry.Config.PluginName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor))
|
||||||
|
c.AddLogger(backendLogger)
|
||||||
config := &logical.BackendConfig{
|
config := &logical.BackendConfig{
|
||||||
StorageView: view,
|
StorageView: view,
|
||||||
Logger: c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor)),
|
Logger: backendLogger,
|
||||||
Config: conf,
|
Config: conf,
|
||||||
System: sysView,
|
System: sysView,
|
||||||
BackendUUID: entry.BackendAwareUUID,
|
BackendUUID: entry.BackendAwareUUID,
|
||||||
|
|
|
@ -204,7 +204,9 @@ func NewPolicyStore(ctx context.Context, core *Core, baseView *BarrierView, syst
|
||||||
func (c *Core) setupPolicyStore(ctx context.Context) error {
|
func (c *Core) setupPolicyStore(ctx context.Context) error {
|
||||||
// Create the policy store
|
// Create the policy store
|
||||||
sysView := &dynamicSystemView{core: c}
|
sysView := &dynamicSystemView{core: c}
|
||||||
c.policyStore = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, c.baseLogger.Named("policy"))
|
psLogger := c.baseLogger.Named("policy")
|
||||||
|
c.AddLogger(psLogger)
|
||||||
|
c.policyStore = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger)
|
||||||
|
|
||||||
if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
|
if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
|
||||||
// Policies will sync from the primary
|
// Policies will sync from the primary
|
||||||
|
|
|
@ -242,7 +242,9 @@ func (c *Core) startRollback() error {
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
c.rollback = NewRollbackManager(c.activeContext, c.baseLogger.Named("rollback"), backendsFunc, c.router, c)
|
rollbackLogger := c.baseLogger.Named("rollback")
|
||||||
|
c.AddLogger(rollbackLogger)
|
||||||
|
c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c)
|
||||||
c.rollback.Start()
|
c.rollback.Start()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,6 +135,8 @@ type TokenStore struct {
|
||||||
|
|
||||||
tidyLock *uint32
|
tidyLock *uint32
|
||||||
|
|
||||||
|
core *Core
|
||||||
|
|
||||||
identityPoliciesDeriverFunc func(string) (*identity.Entity, []string, error)
|
identityPoliciesDeriverFunc func(string) (*identity.Entity, []string, error)
|
||||||
|
|
||||||
quitContext context.Context
|
quitContext context.Context
|
||||||
|
@ -148,12 +150,13 @@ func NewTokenStore(ctx context.Context, logger log.Logger, c *Core, config *logi
|
||||||
|
|
||||||
// Initialize the store
|
// Initialize the store
|
||||||
t := &TokenStore{
|
t := &TokenStore{
|
||||||
view: view,
|
view: view,
|
||||||
cubbyholeDestroyer: destroyCubbyhole,
|
cubbyholeDestroyer: destroyCubbyhole,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
tokenLocks: locksutil.CreateLocks(),
|
tokenLocks: locksutil.CreateLocks(),
|
||||||
tokensPendingDeletion: &sync.Map{},
|
tokensPendingDeletion: &sync.Map{},
|
||||||
saltLock: sync.RWMutex{},
|
saltLock: sync.RWMutex{},
|
||||||
|
core: c,
|
||||||
identityPoliciesDeriverFunc: c.fetchEntityAndDerivedPolicies,
|
identityPoliciesDeriverFunc: c.fetchEntityAndDerivedPolicies,
|
||||||
tidyLock: new(uint32),
|
tidyLock: new(uint32),
|
||||||
quitContext: c.activeContext,
|
quitContext: c.activeContext,
|
||||||
|
@ -1309,6 +1312,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data
|
||||||
ctx = ts.quitContext
|
ctx = ts.quitContext
|
||||||
|
|
||||||
logger := ts.logger.Named("tidy")
|
logger := ts.logger.Named("tidy")
|
||||||
|
ts.core.AddLogger(logger)
|
||||||
|
|
||||||
var tidyErrors *multierror.Error
|
var tidyErrors *multierror.Error
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,13 @@ to specify where the configuration is.
|
||||||
- `telemetry` <tt>([Telemetry][telemetry]: <none>)</tt> – Specifies the telemetry
|
- `telemetry` <tt>([Telemetry][telemetry]: <none>)</tt> – Specifies the telemetry
|
||||||
reporting system.
|
reporting system.
|
||||||
|
|
||||||
|
- `log_level` `(string: "")` – Specifies the log level to use; overridden by
|
||||||
|
CLI and env var parameters. On SIGHUP, Vault will update the log level to the
|
||||||
|
current value specified here (including overriding the CLI/env var
|
||||||
|
parameters). Not all parts of Vault's logging can have its level be changed
|
||||||
|
dynamically this way; in particular, secrets/auth plugins are currently not
|
||||||
|
updated dynamically.
|
||||||
|
|
||||||
- `default_lease_ttl` `(string: "768h")` – Specifies the default lease duration
|
- `default_lease_ttl` `(string: "768h")` – Specifies the default lease duration
|
||||||
for tokens and secrets. This is specified using a label suffix like `"30s"` or
|
for tokens and secrets. This is specified using a label suffix like `"30s"` or
|
||||||
`"1h"`. This value cannot be larger than `max_lease_ttl`.
|
`"1h"`. This value cannot be larger than `max_lease_ttl`.
|
||||||
|
|
Loading…
Reference in New Issue