Revert "Automatically track subloggers in allLoggers (#22038)" (#24005)

This reverts commit 4c8cc87794ed2d989f515cd30c1c1b953d092ef3.
This commit is contained in:
Hamid Ghaf 2023-11-03 14:40:17 -07:00 committed by GitHub
parent 759b1a7c02
commit 22553906fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 72 additions and 144 deletions

View File

@ -1,3 +0,0 @@
```release-note:bug
core: All subloggers now reflect configured log level on reload.
```

View File

@ -40,7 +40,6 @@ import (
"github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/helper/builtinplugins"
"github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/helper/constants"
"github.com/hashicorp/vault/helper/experiments" "github.com/hashicorp/vault/helper/experiments"
"github.com/hashicorp/vault/helper/logging"
loghelper "github.com/hashicorp/vault/helper/logging" loghelper "github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/namespace"
@ -121,7 +120,6 @@ type ServerCommand struct {
licenseReloadedCh chan (error) // for tests licenseReloadedCh chan (error) // for tests
allLoggers []hclog.Logger allLoggers []hclog.Logger
logging.SubloggerAdder
flagConfigs []string flagConfigs []string
flagRecovery bool flagRecovery bool
@ -443,26 +441,6 @@ func (c *ServerCommand) parseConfig() (*server.Config, []configutil.ConfigError,
return config, configErrors, nil return config, configErrors, nil
} }
// AppendToAllLoggers is registered with the base logger to handle creation of
// new subloggers through the phases of server startup. There are three phases
// we need to handle: (1) Before CoreConfig is created, new subloggers are added
// to c.allLoggers; (2) After CoreConfig is created, new subloggers are added to
// CoreConfig.AllLoggers; (3) After Core instantiation, new subloggers are
// appended to Core.allLoggers. This logic is managed by the SubloggerAdder
// interface.
//
// NOTE: Core.allLoggers must be set to CoreConfig.allLoggers after NewCore to
// keep track of new subloggers added before c.SubloggerAdder gets reassigned to
// the Core implementation.
func (c *ServerCommand) AppendToAllLoggers(sub hclog.Logger) hclog.Logger {
if c.SubloggerAdder == nil {
c.allLoggers = append(c.allLoggers, sub)
return sub
}
return c.SubloggerHook(sub)
}
func (c *ServerCommand) runRecoveryMode() int { func (c *ServerCommand) runRecoveryMode() int {
config, configErrors, err := c.parseConfig() config, configErrors, err := c.parseConfig()
if err != nil { if err != nil {
@ -608,7 +586,6 @@ func (c *ServerCommand) runRecoveryMode() int {
DisableMlock: config.DisableMlock, DisableMlock: config.DisableMlock,
RecoveryMode: c.flagRecovery, RecoveryMode: c.flagRecovery,
ClusterAddr: config.ClusterAddr, ClusterAddr: config.ClusterAddr,
AllLoggers: c.allLoggers,
} }
core, newCoreError := vault.NewCore(coreConfig) core, newCoreError := vault.NewCore(coreConfig)
@ -832,6 +809,7 @@ func (c *ServerCommand) setupStorage(config *server.Config) (physical.Backend, e
} }
namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) namedStorageLogger := c.logger.Named("storage." + config.Storage.Type)
c.allLoggers = append(c.allLoggers, namedStorageLogger)
backend, err := factory(config.Storage.Config, namedStorageLogger) backend, err := factory(config.Storage.Config, namedStorageLogger)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error initializing storage of type %s: %w", config.Storage.Type, err) return nil, fmt.Errorf("Error initializing storage of type %s: %w", config.Storage.Type, err)
@ -847,6 +825,7 @@ func beginServiceRegistration(c *ServerCommand, config *server.Config) (sr.Servi
} }
namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type) namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type)
c.allLoggers = append(c.allLoggers, namedSDLogger)
// Since we haven't even begun starting Vault's core yet, // Since we haven't even begun starting Vault's core yet,
// we know that Vault is in its pre-running state. // we know that Vault is in its pre-running state.
@ -1125,6 +1104,7 @@ func (c *ServerCommand) Run(args []string) int {
// create GRPC logger // create GRPC logger
namedGRPCLogFaker := c.logger.Named("grpclogfaker") namedGRPCLogFaker := c.logger.Named("grpclogfaker")
c.allLoggers = append(c.allLoggers, namedGRPCLogFaker)
grpclog.SetLogger(&grpclogFaker{ grpclog.SetLogger(&grpclogFaker{
logger: namedGRPCLogFaker, logger: namedGRPCLogFaker,
log: os.Getenv("VAULT_GRPC_LOGGING") != "", log: os.Getenv("VAULT_GRPC_LOGGING") != "",
@ -1277,10 +1257,6 @@ func (c *ServerCommand) Run(args []string) int {
return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
} }
// Keep track of new subloggers in coreConfig.AllLoggers until we hand it
// off to core
c.SubloggerAdder = &coreConfig
if c.flagDevFourCluster { if c.flagDevFourCluster {
return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
} }
@ -1368,10 +1344,6 @@ func (c *ServerCommand) Run(args []string) int {
} }
// Now we can use the core SubloggerHook to add any new subloggers to
// core.allLoggers
c.SubloggerAdder = core
// Copy the reload funcs pointers back // Copy the reload funcs pointers back
c.reloadFuncs = coreConfig.ReloadFuncs c.reloadFuncs = coreConfig.ReloadFuncs
c.reloadFuncsLock = coreConfig.ReloadFuncsLock c.reloadFuncsLock = coreConfig.ReloadFuncsLock
@ -1850,7 +1822,6 @@ func (c *ServerCommand) configureLogging(config *server.Config) (hclog.Intercept
LogRotateDuration: logRotateDuration, LogRotateDuration: logRotateDuration,
LogRotateBytes: config.LogRotateBytes, LogRotateBytes: config.LogRotateBytes,
LogRotateMaxFiles: config.LogRotateMaxFiles, LogRotateMaxFiles: config.LogRotateMaxFiles,
SubloggerHook: c.AppendToAllLoggers,
} }
return loghelper.Setup(logCfg, c.logWriter) return loghelper.Setup(logCfg, c.logWriter)
@ -2558,6 +2529,7 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma
var seal vault.Seal var seal vault.Seal
sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
c.allLoggers = append(c.allLoggers, sealLogger)
defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper())) defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper()))
var sealInfoKeys []string var sealInfoKeys []string
sealInfoMap := map[string]string{} sealInfoMap := map[string]string{}
@ -2612,6 +2584,7 @@ func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.Co
} }
namedHALogger := c.logger.Named("ha." + config.HAStorage.Type) namedHALogger := c.logger.Named("ha." + config.HAStorage.Type)
c.allLoggers = append(c.allLoggers, namedHALogger)
habackend, err := factory(config.HAStorage.Config, namedHALogger) habackend, err := factory(config.HAStorage.Config, namedHALogger)
if err != nil { if err != nil {
return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err) return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err)

View File

@ -48,17 +48,6 @@ type LogConfig struct {
// LogRotateMaxFiles is the maximum number of past archived log files to keep // LogRotateMaxFiles is the maximum number of past archived log files to keep
LogRotateMaxFiles int LogRotateMaxFiles int
// SubloggerHook handles creation of new subloggers, automatically appending
// them to core's running list of allLoggers.
// see: server.AppendToAllLoggers for more details.
SubloggerHook func(log.Logger) log.Logger
}
// SubloggerAdder is an interface which facilitates tracking of new subloggers
// added between phases of server startup.
type SubloggerAdder interface {
SubloggerHook(logger log.Logger) log.Logger
} }
func (c *LogConfig) isLevelInvalid() bool { func (c *LogConfig) isLevelInvalid() bool {
@ -159,7 +148,6 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) {
IndependentLevels: true, IndependentLevels: true,
Output: io.MultiWriter(writers...), Output: io.MultiWriter(writers...),
JSONFormat: config.isFormatJson(), JSONFormat: config.isFormatJson(),
SubloggerHook: config.SubloggerHook,
}) })
return logger, nil return logger, nil

View File

@ -18,7 +18,6 @@ import (
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/builtin/credential/approle" "github.com/hashicorp/vault/builtin/credential/approle"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/plugins/database/mysql" "github.com/hashicorp/vault/plugins/database/mysql"
"github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/consts"
@ -391,32 +390,6 @@ type TestLogger struct {
Path string Path string
File *os.File File *os.File
sink hclog.SinkAdapter sink hclog.SinkAdapter
// For managing temporary start-up state
sync.RWMutex
AllLoggers []hclog.Logger
logging.SubloggerAdder
}
// RegisterSubloggerAdder checks to see if the provided logger interface is a
// TestLogger and re-assigns the SubloggerHook implementation if so.
func RegisterSubloggerAdder(logger hclog.Logger, adder logging.SubloggerAdder) {
if l, ok := logger.(*TestLogger); ok {
l.Lock()
l.SubloggerAdder = adder
l.Unlock()
}
}
// AppendToAllLoggers appends the sub logger to allLoggers, or if the TestLogger
// is assigned to a SubloggerAdder implementation, it calls the underlying hook.
func (l *TestLogger) AppendToAllLoggers(sub hclog.Logger) hclog.Logger {
l.Lock()
defer l.Unlock()
if l.SubloggerAdder == nil {
l.AllLoggers = append(l.AllLoggers, sub)
return sub
}
return l.SubloggerHook(sub)
} }
func NewTestLogger(t testing.T) *TestLogger { func NewTestLogger(t testing.T) *TestLogger {
@ -440,31 +413,25 @@ func NewTestLogger(t testing.T) *TestLogger {
output = logFile output = logFile
} }
sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{
Output: output,
Level: hclog.Trace,
IndependentLevels: true,
})
testLogger := &TestLogger{
Path: logPath,
File: logFile,
sink: sink,
}
// We send nothing on the regular logger, that way we can later deregister // We send nothing on the regular logger, that way we can later deregister
// the sink to stop logging during cluster cleanup. // the sink to stop logging during cluster cleanup.
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
Output: io.Discard, Output: io.Discard,
IndependentLevels: true, IndependentLevels: true,
Name: t.Name(), Name: t.Name(),
SubloggerHook: testLogger.AppendToAllLoggers,
}) })
sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{
Output: output,
Level: hclog.Trace,
IndependentLevels: true,
})
logger.RegisterSink(sink) logger.RegisterSink(sink)
testLogger.InterceptLogger = logger return &TestLogger{
Path: logPath,
return testLogger File: logFile,
InterceptLogger: logger,
sink: sink,
}
} }
func (tl *TestLogger) StopLogging() { func (tl *TestLogger) StopLogging() {

View File

@ -1101,6 +1101,7 @@ func (c *Core) setupActivityLog(ctx context.Context, wg *sync.WaitGroup) error {
// this function should be called with activityLogLock. // this function should be called with activityLogLock.
func (c *Core) setupActivityLogLocked(ctx context.Context, wg *sync.WaitGroup) error { func (c *Core) setupActivityLogLocked(ctx context.Context, wg *sync.WaitGroup) error {
logger := c.baseLogger.Named("activity") logger := c.baseLogger.Named("activity")
c.AddLogger(logger)
if os.Getenv("VAULT_DISABLE_ACTIVITY_LOG") != "" { if os.Getenv("VAULT_DISABLE_ACTIVITY_LOG") != "" {
if c.CensusLicensingEnabled() { if c.CensusLicensingEnabled() {

View File

@ -381,6 +381,7 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo
// initialize the audit backends // initialize the audit backends
func (c *Core) setupAudits(ctx context.Context) error { func (c *Core) setupAudits(ctx context.Context) error {
brokerLogger := c.baseLogger.Named("audit") brokerLogger := c.baseLogger.Named("audit")
c.AddLogger(brokerLogger)
broker := NewAuditBroker(brokerLogger) broker := NewAuditBroker(brokerLogger)
c.auditLock.Lock() c.auditLock.Lock()
@ -489,6 +490,7 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi
} }
auditLogger := c.baseLogger.Named("audit") auditLogger := c.baseLogger.Named("audit")
c.AddLogger(auditLogger)
switch entry.Type { switch entry.Type {
case "file": case "file":

View File

@ -997,6 +997,7 @@ func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysV
conf["plugin_version"] = entry.Version conf["plugin_version"] = entry.Version
authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor)) authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor))
c.AddLogger(authLogger)
pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{ pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{
MountClass: consts.PluginTypeCredential.String(), MountClass: consts.PluginTypeCredential.String(),
MountAccessor: entry.Accessor, MountAccessor: entry.Accessor,

View File

@ -323,6 +323,7 @@ func (c *Core) startClusterListener(ctx context.Context) error {
if networkLayer == nil { if networkLayer == nil {
tcpLogger := c.logger.Named("cluster-listener.tcp") tcpLogger := c.logger.Named("cluster-listener.tcp")
networkLayer = cluster.NewTCPLayer(c.clusterListenerAddrs, tcpLogger) networkLayer = cluster.NewTCPLayer(c.clusterListenerAddrs, tcpLogger)
c.AddLogger(tcpLogger)
} }
listenerLogger := c.logger.Named("cluster-listener") listenerLogger := c.logger.Named("cluster-listener")
@ -331,6 +332,8 @@ func (c *Core) startClusterListener(ctx context.Context) error {
listenerLogger, listenerLogger,
5*c.clusterHeartbeatInterval)) 5*c.clusterHeartbeatInterval))
c.AddLogger(listenerLogger)
err := c.getClusterListener().Run(ctx) err := c.getClusterListener().Run(ctx)
if err != nil { if err != nil {
return err return err

View File

@ -877,13 +877,6 @@ type CoreConfig struct {
NumRollbackWorkers int NumRollbackWorkers int
} }
// SubloggerHook implements the SubloggerAdder interface. This implementation
// manages CoreConfig.AllLoggers state prior to (and during) NewCore.
func (c *CoreConfig) SubloggerHook(logger log.Logger) log.Logger {
c.AllLoggers = append(c.AllLoggers, logger)
return logger
}
// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does // GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
// not exist. // not exist.
func (c *CoreConfig) GetServiceRegistration() sr.ServiceRegistration { func (c *CoreConfig) GetServiceRegistration() sr.ServiceRegistration {
@ -1071,7 +1064,10 @@ func CreateCore(conf *CoreConfig) (*Core, error) {
c.shutdownDoneCh.Store(make(chan struct{})) c.shutdownDoneCh.Store(make(chan struct{}))
c.allLoggers = append(c.allLoggers, c.logger)
c.router.logger = c.logger.Named("router") c.router.logger = c.logger.Named("router")
c.allLoggers = append(c.allLoggers, c.router.logger)
c.inFlightReqData = &InFlightRequests{ c.inFlightReqData = &InFlightRequests{
InFlightReqMap: &sync.Map{}, InFlightReqMap: &sync.Map{},
@ -1219,6 +1215,9 @@ func NewCore(conf *CoreConfig) (*Core, error) {
// MFA method // MFA method
c.loginMFABackend = NewLoginMFABackend(c, conf.Logger) c.loginMFABackend = NewLoginMFABackend(c, conf.Logger)
if c.loginMFABackend.mfaLogger != nil {
c.AddLogger(c.loginMFABackend.mfaLogger)
}
// Logical backends // Logical backends
c.configureLogicalBackends(conf.LogicalBackends, conf.Logger, conf.AdministrativeNamespacePath) c.configureLogicalBackends(conf.LogicalBackends, conf.Logger, conf.AdministrativeNamespacePath)
@ -1244,6 +1243,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
// Quotas // Quotas
quotasLogger := conf.Logger.Named("quotas") quotasLogger := conf.Logger.Named("quotas")
c.allLoggers = append(c.allLoggers, quotasLogger)
detectDeadlocks := false detectDeadlocks := false
for _, v := range c.detectDeadlocks { for _, v := range c.detectDeadlocks {
@ -1269,7 +1269,10 @@ func NewCore(conf *CoreConfig) (*Core, error) {
} }
// Events // Events
events, err := eventbus.NewEventBus(conf.Logger.Named("events")) eventsLogger := conf.Logger.Named("events")
c.allLoggers = append(c.allLoggers, eventsLogger)
// start the event system
events, err := eventbus.NewEventBus(eventsLogger)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1278,11 +1281,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
c.events.Start() c.events.Start()
} }
// Make sure we're keeping track of the subloggers added above. We haven't
// yet registered core to the server command's SubloggerAdder, so any new
// subloggers will be in conf.AllLoggers.
c.allLoggers = conf.AllLoggers
return c, nil return c, nil
} }
@ -1341,7 +1339,10 @@ func (c *Core) configureCredentialsBackends(backends map[string]logical.Factory,
} }
credentialBackends[mountTypeToken] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { credentialBackends[mountTypeToken] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
return NewTokenStore(ctx, logger.Named("token"), c, config) tsLogger := logger.Named("token")
c.AddLogger(tsLogger)
return NewTokenStore(ctx, tsLogger, c, config)
} }
c.credentialBackends = credentialBackends c.credentialBackends = credentialBackends
@ -1369,7 +1370,9 @@ func (c *Core) configureLogicalBackends(backends map[string]logical.Factory, log
// System // System
logicalBackends[mountTypeSystem] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { logicalBackends[mountTypeSystem] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
b := NewSystemBackend(c, logger.Named("system")) sysBackendLogger := logger.Named("system")
c.AddLogger(sysBackendLogger)
b := NewSystemBackend(c, sysBackendLogger)
if err := b.Setup(ctx, config); err != nil { if err := b.Setup(ctx, config); err != nil {
return nil, err return nil, err
} }
@ -1378,7 +1381,9 @@ func (c *Core) configureLogicalBackends(backends map[string]logical.Factory, log
// Identity // Identity
logicalBackends[mountTypeIdentity] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { logicalBackends[mountTypeIdentity] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
return NewIdentityStore(ctx, c, config, logger.Named("identity")) identityLogger := logger.Named("identity")
c.AddLogger(identityLogger)
return NewIdentityStore(ctx, c, config, identityLogger)
} }
c.logicalBackends = logicalBackends c.logicalBackends = logicalBackends
@ -3155,14 +3160,6 @@ func (c *Core) AddLogger(logger log.Logger) {
c.allLoggers = append(c.allLoggers, logger) c.allLoggers = append(c.allLoggers, logger)
} }
// SubloggerHook implements the SubloggerAdder interface. We add this method to
// the server command after NewCore returns with a Core object. The hook keeps
// track of newly added subloggers without manual calls to c.AddLogger.
func (c *Core) SubloggerHook(logger log.Logger) log.Logger {
c.AddLogger(logger)
return logger
}
// SetLogLevel sets logging level for all tracked loggers to the level provided // SetLogLevel sets logging level for all tracked loggers to the level provided
func (c *Core) SetLogLevel(level log.Level) { func (c *Core) SetLogLevel(level log.Level) {
c.allLoggersLock.RLock() c.allLoggersLock.RLock()

View File

@ -40,9 +40,11 @@ func coreInit(c *Core, conf *CoreConfig) error {
phys := conf.Physical phys := conf.Physical
_, txnOK := phys.(physical.Transactional) _, txnOK := phys.(physical.Transactional)
sealUnwrapperLogger := conf.Logger.Named("storage.sealunwrapper") sealUnwrapperLogger := conf.Logger.Named("storage.sealunwrapper")
c.allLoggers = append(c.allLoggers, sealUnwrapperLogger)
c.sealUnwrapper = NewSealUnwrapper(phys, sealUnwrapperLogger) c.sealUnwrapper = NewSealUnwrapper(phys, sealUnwrapperLogger)
// Wrap the physical backend in a cache layer if enabled // Wrap the physical backend in a cache layer if enabled
cacheLogger := c.baseLogger.Named("storage.cache") cacheLogger := c.baseLogger.Named("storage.cache")
c.allLoggers = append(c.allLoggers, cacheLogger)
if txnOK { if txnOK {
c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger, c.MetricSink().Sink) c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger, c.MetricSink().Sink)
} else { } else {

View File

@ -332,6 +332,8 @@ func NewExpirationManager(c *Core, view *BarrierView, e ExpireLeaseStrategy, log
jobManager := fairshare.NewJobManager("expire", getNumExpirationWorkers(c, logger), managerLogger, c.metricSink) jobManager := fairshare.NewJobManager("expire", getNumExpirationWorkers(c, logger), managerLogger, c.metricSink)
jobManager.Start() jobManager.Start()
c.AddLogger(managerLogger)
exp := &ExpirationManager{ exp := &ExpirationManager{
core: c, core: c,
router: c.router, router: c.router,
@ -396,6 +398,7 @@ func (c *Core) setupExpiration(e ExpireLeaseStrategy) error {
// Create the manager // Create the manager
expLogger := c.baseLogger.Named("expiration") expLogger := c.baseLogger.Named("expiration")
c.AddLogger(expLogger)
detectDeadlocks := false detectDeadlocks := false
for _, v := range c.detectDeadlocks { for _, v := range c.detectDeadlocks {
@ -559,6 +562,7 @@ func (m *ExpirationManager) Tidy(ctx context.Context) error {
var tidyErrors *multierror.Error var tidyErrors *multierror.Error
logger := m.logger.Named("tidy") logger := m.logger.Named("tidy")
m.core.AddLogger(logger)
if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) { if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
logger.Warn("tidy operation on leases is already in progress") logger.Warn("tidy operation on leases is already in progress")

View File

@ -75,8 +75,11 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo
} }
entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities") entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities")
core.AddLogger(entitiesPackerLogger)
localAliasesPackerLogger := iStore.logger.Named("storagepacker").Named("local-aliases") localAliasesPackerLogger := iStore.logger.Named("storagepacker").Named("local-aliases")
core.AddLogger(localAliasesPackerLogger)
groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups")
core.AddLogger(groupsPackerLogger)
iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "") iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "")
if err != nil { if err != nil {

View File

@ -33,6 +33,7 @@ import (
"github.com/hashicorp/vault/sdk/helper/compressutil" "github.com/hashicorp/vault/sdk/helper/compressutil"
"github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/helper/pluginutil"
"github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/helper/testhelpers/schema"
"github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/logical"
@ -5555,13 +5556,6 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
true, true,
false, false,
}, },
{
"events",
"invalid",
"does-not-matter",
true,
false,
},
{ {
"", "",
"info", "info",
@ -5584,9 +5578,10 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
t.Run(fmt.Sprintf("loggers-by-name-%s", tc.logger), func(t *testing.T) { t.Run(fmt.Sprintf("loggers-by-name-%s", tc.logger), func(t *testing.T) {
t.Parallel() t.Parallel()
core, _, _ := TestCoreUnsealed(t) core, _, _ := TestCoreUnsealedWithConfig(t, &CoreConfig{
Logger: logging.NewVaultLogger(hclog.Trace),
})
b := core.systemBackend b := core.systemBackend
testLoggerName := t.Name() + "." + tc.logger
// Test core overrides logging level outside of config, // Test core overrides logging level outside of config,
// an initial delete will ensure that we an initial read // an initial delete will ensure that we an initial read
@ -5615,7 +5610,7 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
initialLoggers := resp.Data initialLoggers := resp.Data
req = &logical.Request{ req = &logical.Request{
Path: fmt.Sprintf("loggers/%s", testLoggerName), Path: fmt.Sprintf("loggers/%s", tc.logger),
Operation: logical.UpdateOperation, Operation: logical.UpdateOperation,
Data: map[string]interface{}{ Data: map[string]interface{}{
"level": tc.level, "level": tc.level,
@ -5660,14 +5655,14 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
t.Fatalf("expected logger %q to be %q, actual: %s", loggerName, tc.expectedLevel, levelStr) t.Fatalf("expected logger %q to be %q, actual: %s", loggerName, tc.expectedLevel, levelStr)
} }
if loggerName != testLoggerName && levelStr != initialLevelStr { if loggerName != tc.logger && levelStr != initialLevelStr {
t.Errorf("expected level of logger %q to be unchanged, expected: %s, actual: %s", loggerName, initialLevelStr, levelStr) t.Errorf("expected level of logger %q to be unchanged, exepcted: %s, actual: %s", loggerName, initialLevelStr, levelStr)
} }
} }
} }
req = &logical.Request{ req = &logical.Request{
Path: fmt.Sprintf("loggers/%s", testLoggerName), Path: fmt.Sprintf("loggers/%s", tc.logger),
Operation: logical.DeleteOperation, Operation: logical.DeleteOperation,
} }
@ -5684,7 +5679,7 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
if !tc.expectDeleteError { if !tc.expectDeleteError {
req = &logical.Request{ req = &logical.Request{
Path: fmt.Sprintf("loggers/%s", testLoggerName), Path: fmt.Sprintf("loggers/%s", tc.logger),
Operation: logical.ReadOperation, Operation: logical.ReadOperation,
} }
@ -5693,18 +5688,18 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
t.Fatalf("unexpected error, err: %v, resp: %#v", err, resp) t.Fatalf("unexpected error, err: %v, resp: %#v", err, resp)
} }
currentLevel, ok := resp.Data[testLoggerName].(string) currentLevel, ok := resp.Data[tc.logger].(string)
if !ok { if !ok {
t.Fatalf("expected resp to include %q, resp: %#v", testLoggerName, resp) t.Fatalf("expected resp to include %q, resp: %#v", tc.logger, resp)
} }
initialLevel, ok := initialLoggers[testLoggerName].(string) initialLevel, ok := initialLoggers[tc.logger].(string)
if !ok { if !ok {
t.Fatalf("expected initial loggers to include %q, resp: %#v", testLoggerName, initialLoggers) t.Fatalf("expected initial loggers to include %q, resp: %#v", tc.logger, initialLoggers)
} }
if currentLevel != initialLevel { if currentLevel != initialLevel {
t.Errorf("expected level of logger %q to match original config, expected: %s, actual: %s", testLoggerName, initialLevel, currentLevel) t.Errorf("expected level of logger %q to match original config, expected: %s, actual: %s", tc.logger, initialLevel, currentLevel)
} }
} }
}) })

View File

@ -1706,6 +1706,7 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView
conf["plugin_version"] = entry.Version conf["plugin_version"] = entry.Version
backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor)) backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor))
c.AddLogger(backendLogger)
pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{ pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{
MountClass: consts.PluginTypeSecrets.String(), MountClass: consts.PluginTypeSecrets.String(),
MountAccessor: entry.Accessor, MountAccessor: entry.Accessor,

View File

@ -256,6 +256,7 @@ func (c *Core) setupPolicyStore(ctx context.Context) error {
var err error var err error
sysView := &dynamicSystemView{core: c, perfStandby: c.perfStandby} sysView := &dynamicSystemView{core: c, perfStandby: c.perfStandby}
psLogger := c.baseLogger.Named("policy") psLogger := c.baseLogger.Named("policy")
c.AddLogger(psLogger)
c.policyStore, err = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger) c.policyStore, err = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger)
if err != nil { if err != nil {
return err return err

View File

@ -360,6 +360,7 @@ func (c *Core) startPeriodicRaftTLSRotate(ctx context.Context) error {
c.raftTLSRotationStopCh = make(chan struct{}) c.raftTLSRotationStopCh = make(chan struct{})
logger := c.logger.Named("raft") logger := c.logger.Named("raft")
c.AddLogger(logger)
if c.isRaftHAOnly() { if c.isRaftHAOnly() {
return c.raftTLSRotateDirect(ctx, logger, c.raftTLSRotationStopCh) return c.raftTLSRotateDirect(ctx, logger, c.raftTLSRotationStopCh)

View File

@ -378,6 +378,7 @@ func (c *Core) startRollback() error {
return ret return ret
} }
rollbackLogger := c.baseLogger.Named("rollback") rollbackLogger := c.baseLogger.Named("rollback")
c.AddLogger(rollbackLogger)
c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c) c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c)
c.rollback.Start() c.rollback.Start()
return nil return nil

View File

@ -90,6 +90,7 @@ func (d *autoSeal) SetCore(core *Core) {
d.core = core d.core = core
if d.logger == nil { if d.logger == nil {
d.logger = d.core.Logger().Named("autoseal") d.logger = d.core.Logger().Named("autoseal")
d.core.AddLogger(d.logger)
} }
} }

View File

@ -216,8 +216,6 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
// Start off with base test core config // Start off with base test core config
conf := testCoreConfig(t, errInjector, logger) conf := testCoreConfig(t, errInjector, logger)
corehelpers.RegisterSubloggerAdder(logger, conf)
// Override config values with ones that gets passed in // Override config values with ones that gets passed in
conf.EnableUI = opts.EnableUI conf.EnableUI = opts.EnableUI
conf.EnableRaw = opts.EnableRaw conf.EnableRaw = opts.EnableRaw
@ -237,7 +235,6 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
conf.CensusAgent = opts.CensusAgent conf.CensusAgent = opts.CensusAgent
conf.AdministrativeNamespacePath = opts.AdministrativeNamespacePath conf.AdministrativeNamespacePath = opts.AdministrativeNamespacePath
conf.ImpreciseLeaseRoleTracking = opts.ImpreciseLeaseRoleTracking conf.ImpreciseLeaseRoleTracking = opts.ImpreciseLeaseRoleTracking
conf.AllLoggers = logger.AllLoggers
if opts.Logger != nil { if opts.Logger != nil {
conf.Logger = opts.Logger conf.Logger = opts.Logger
@ -272,8 +269,6 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
// Switch the SubloggerHook over to core
corehelpers.RegisterSubloggerAdder(logger, c)
return c return c
} }
@ -1533,8 +1528,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(),
} }
corehelpers.RegisterSubloggerAdder(testCluster.Logger, coreConfig)
if base != nil { if base != nil {
coreConfig.DetectDeadlocks = TestDeadlockDetection coreConfig.DetectDeadlocks = TestDeadlockDetection
coreConfig.RawConfig = base.RawConfig coreConfig.RawConfig = base.RawConfig
@ -1703,8 +1696,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
for i := 0; i < numCores; i++ { for i := 0; i < numCores; i++ {
cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey) cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey)
corehelpers.RegisterSubloggerAdder(testCluster.Logger, c)
testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup) testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup)
cores = append(cores, c) cores = append(cores, c)
coreConfigs = append(coreConfigs, &localConfig) coreConfigs = append(coreConfigs, &localConfig)
@ -1821,7 +1812,6 @@ func GenerateListenerAddr(t testing.T, opts *TestClusterOptions, certIPs []net.I
if opts != nil && opts.BaseListenAddress != "" { if opts != nil && opts.BaseListenAddress != "" {
baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress) baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress)
if err != nil { if err != nil {
t.Fatal("could not parse given base IP") t.Fatal("could not parse given base IP")
} }