Logger cleanup (#5480)
This commit is contained in:
parent
1b8b9a49d4
commit
b47e648ddf
|
@ -239,7 +239,7 @@ reply:
|
|||
}}
|
||||
if retErr != nil {
|
||||
if b.Logger().IsWarn() {
|
||||
b.Logger().Warn("Possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr)
|
||||
b.Logger().Warn("possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr)
|
||||
}
|
||||
}
|
||||
retErr = nil
|
||||
|
|
|
@ -213,7 +213,7 @@ func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.B
|
|||
if err := to.Put(ctx, entry); err != nil {
|
||||
return errwrap.Wrapf("error writing entry: {{err}}", err)
|
||||
}
|
||||
c.logger.Info("copied key: " + path)
|
||||
c.logger.Info("copied key", "path", path)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1805,7 +1805,7 @@ func (c *ServerCommand) migrationActive(backend physical.Backend) bool {
|
|||
// unexpected state, so stop buffering log messages
|
||||
c.logGate.Flush()
|
||||
}
|
||||
c.logger.Warn("migration_check: " + err.Error())
|
||||
c.logger.Warn("migration check error", "error", err.Error())
|
||||
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
|
|
|
@ -209,7 +209,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32)
|
|||
// Check for panics, otherwise notify we're done
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
core.Logger().Error("got a panic: %v", err)
|
||||
core.Logger().Error("got a panic", "error", err)
|
||||
t.Fail()
|
||||
}
|
||||
atomic.AddUint32(totalOps, myTotalOps)
|
||||
|
|
|
@ -475,9 +475,9 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
|
|||
statusCode, header, retBytes, err := core.ForwardRequest(r)
|
||||
if err != nil {
|
||||
if err == vault.ErrCannotForward {
|
||||
core.Logger().Debug("handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
|
||||
core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back")
|
||||
} else {
|
||||
core.Logger().Error("handleRequestForwarding: error forwarding request", "error", err)
|
||||
core.Logger().Error("forward request error", "error", err)
|
||||
}
|
||||
|
||||
// Fall back to redirection
|
||||
|
|
|
@ -76,7 +76,7 @@ func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request)
|
|||
barrierConfig.SecretShares = 1
|
||||
barrierConfig.SecretThreshold = 1
|
||||
barrierConfig.StoredShares = 1
|
||||
core.Logger().Warn("init: stored keys supported, forcing shares/threshold to 1")
|
||||
core.Logger().Warn("stored keys supported on init, forcing shares/threshold to 1")
|
||||
} else {
|
||||
if barrierConfig.StoredShares > 0 {
|
||||
respondError(w, http.StatusBadRequest, fmt.Errorf("stored keys are not supported by the current seal type"))
|
||||
|
|
|
@ -144,7 +144,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error
|
|||
// Client
|
||||
opts := []option.ClientOption{option.WithUserAgent(useragent.String())}
|
||||
if credentialsFile := c["credentials_file"]; credentialsFile != "" {
|
||||
logger.Warn("physical.gcs: specifying credentials_file as an option is " +
|
||||
logger.Warn("specifying credentials_file as an option is " +
|
||||
"deprecated. Please use the GOOGLE_APPLICATION_CREDENTIALS environment " +
|
||||
"variable or instance credentials instead.")
|
||||
opts = append(opts, option.WithServiceAccountFile(credentialsFile))
|
||||
|
|
|
@ -629,15 +629,15 @@ func (i *ZooKeeperHALock) Unlock() error {
|
|||
var err error
|
||||
|
||||
if err = i.unlockInternal(); err != nil {
|
||||
i.logger.Error("zookeeper: failed to release distributed lock", "error", err)
|
||||
i.logger.Error("failed to release distributed lock", "error", err)
|
||||
|
||||
go func(i *ZooKeeperHALock) {
|
||||
attempts := 0
|
||||
i.logger.Info("zookeeper: launching automated distributed lock release")
|
||||
i.logger.Info("launching automated distributed lock release")
|
||||
|
||||
for {
|
||||
if err := i.unlockInternal(); err == nil {
|
||||
i.logger.Info("zookeeper: distributed lock released")
|
||||
i.logger.Info("distributed lock released")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -645,7 +645,7 @@ func (i *ZooKeeperHALock) Unlock() error {
|
|||
case <-time.After(time.Second):
|
||||
attempts := attempts + 1
|
||||
if attempts >= 10 {
|
||||
i.logger.Error("zookeeper: release lock max attempts reached. Lock may not be released", "error", err)
|
||||
i.logger.Error("release lock max attempts reached. Lock may not be released", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
|
|
|
@ -187,7 +187,7 @@ func (c *Core) setupExpiration(e ExpireLeaseStrategy) error {
|
|||
errorFunc := func() {
|
||||
c.logger.Error("shutting down")
|
||||
if err := c.Shutdown(); err != nil {
|
||||
c.logger.Error("error shutting down core: %v", err)
|
||||
c.logger.Error("error shutting down core", "error", err)
|
||||
}
|
||||
}
|
||||
go c.expiration.Restore(errorFunc)
|
||||
|
|
|
@ -1019,7 +1019,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string,
|
|||
|
||||
mountEntry := b.Core.router.MatchingMountEntry(ctx, path)
|
||||
if mountEntry == nil {
|
||||
b.Backend.Logger().Error("tune failed: no mount entry found", "path", path)
|
||||
b.Backend.Logger().Error("tune failed", "error", "no mount entry found", "path", path)
|
||||
return handleError(fmt.Errorf("tune of path %q failed: no mount entry found", path))
|
||||
}
|
||||
if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
|
||||
|
@ -1040,7 +1040,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string,
|
|||
// Check again after grabbing the lock
|
||||
mountEntry = b.Core.router.MatchingMountEntry(ctx, path)
|
||||
if mountEntry == nil {
|
||||
b.Backend.Logger().Error("tune failed: no mount entry found", "path", path)
|
||||
b.Backend.Logger().Error("tune failed", "error", "no mount entry found", "path", path)
|
||||
return handleError(fmt.Errorf("tune of path %q failed: no mount entry found", path))
|
||||
}
|
||||
if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
|
||||
|
|
|
@ -166,14 +166,14 @@ func (c *Core) startForwarding(ctx context.Context) error {
|
|||
}()
|
||||
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core/startClusterListener: starting listener", "listener_address", laddr)
|
||||
c.logger.Info("starting listener", "listener_address", laddr)
|
||||
}
|
||||
|
||||
// Create a TCP listener. We do this separately and specifically
|
||||
// with TCP so that we can set deadlines.
|
||||
tcpLn, err := net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
c.logger.Error("core/startClusterListener: error starting listener", "error", err)
|
||||
c.logger.Error("error starting listener", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ func (c *Core) startForwarding(ctx context.Context) error {
|
|||
defer tlsLn.Close()
|
||||
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core/startClusterListener: serving cluster requests", "cluster_listen_address", tlsLn.Addr())
|
||||
c.logger.Info("serving cluster requests", "cluster_listen_address", tlsLn.Addr())
|
||||
}
|
||||
|
||||
for {
|
||||
|
|
|
@ -41,7 +41,7 @@ func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *fo
|
|||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
s.core.logger.Error("forwarding: panic serving request", "path", req.URL.Path, "error", err, "stacktrace", string(buf))
|
||||
s.core.logger.Error("panic serving forwarded request", "path", req.URL.Path, "error", err, "stacktrace", string(buf))
|
||||
}
|
||||
}()
|
||||
s.handler.ServeHTTP(w, req)
|
||||
|
|
|
@ -851,7 +851,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re
|
|||
NonHMACReqDataKeys: nonHMACReqDataKeys,
|
||||
}
|
||||
if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
|
||||
c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
|
||||
c.logger.Error("failed to audit request", "path", req.Path, "error", err)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue