various tweaks on top of the hclog work (#7165)
This commit is contained in:
parent
44b921a780
commit
01ebdff2a9
|
@ -61,8 +61,11 @@ func (a *Agent) resolveIdentityFromToken(secretID string) (bool, structs.ACLIden
|
|||
// so we can safely log it without handling non-critical errors at the usage site.
|
||||
func (a *Agent) aclAccessorID(secretID string) string {
|
||||
_, ident, err := a.resolveIdentityFromToken(secretID)
|
||||
if acl.IsErrNotFound(err) {
|
||||
return ""
|
||||
}
|
||||
if err != nil {
|
||||
a.logger.Debug("error", err)
|
||||
a.logger.Debug("non-critical error resolving acl token accessor for logging", "error", err)
|
||||
return ""
|
||||
}
|
||||
if ident == nil {
|
||||
|
|
|
@ -1982,7 +1982,7 @@ OUTER:
|
|||
if err := a.RPC("Coordinate.Update", &req, &reply); err != nil {
|
||||
if acl.IsErrPermissionDenied(err) {
|
||||
accessorID := a.aclAccessorID(agentToken)
|
||||
a.logger.Warn("Coordinate update blocked by ACLs", "accesorID", accessorID)
|
||||
a.logger.Warn("Coordinate update blocked by ACLs", "accessorID", accessorID)
|
||||
} else {
|
||||
a.logger.Error("Coordinate update error", "error", err)
|
||||
}
|
||||
|
|
|
@ -784,8 +784,8 @@ func (r *ACLResolver) collectPoliciesForIdentity(identity structs.ACLIdentity, p
|
|||
} else {
|
||||
r.logger.Warn("policy not found for identity",
|
||||
"policy", policyID,
|
||||
"identity", identity.ID(),
|
||||
"accessorID", accessorID)
|
||||
"accessorID", accessorID,
|
||||
)
|
||||
}
|
||||
|
||||
continue
|
||||
|
@ -885,7 +885,6 @@ func (r *ACLResolver) collectRolesForIdentity(identity structs.ACLIdentity, role
|
|||
}
|
||||
r.logger.Warn("role not found for identity",
|
||||
"role", roleID,
|
||||
"identity", identity.ID(),
|
||||
"accessorID", accessorID,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
|
@ -119,14 +118,12 @@ func (s *Server) fetchConfigEntries(lastRemoteIndex uint64) (*structs.IndexedGen
|
|||
}
|
||||
|
||||
func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (uint64, bool, error) {
|
||||
replicationLogger := s.loggers.Named(logging.Replication).Named(logging.ConfigEntry)
|
||||
|
||||
remote, err := s.fetchConfigEntries(lastRemoteIndex)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to retrieve remote config entries: %v", err)
|
||||
}
|
||||
|
||||
replicationLogger.Debug("finished fetching config entries", "amount", len(remote.Entries))
|
||||
logger.Debug("finished fetching config entries", "amount", len(remote.Entries))
|
||||
|
||||
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
||||
// RPC which could have been hanging around for a long time and during that time leadership could
|
||||
|
@ -162,14 +159,14 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
// The lastRemoteIndex is not used when the entry exists either only in the local state or
|
||||
// only in the remote state. In those situations we need to either delete it or create it.
|
||||
if remote.QueryMeta.Index < lastRemoteIndex {
|
||||
replicationLogger.Warn("Config Entry replication remote index moved backwards, forcing a full Config Entry sync",
|
||||
logger.Warn("Config Entry replication remote index moved backwards, forcing a full Config Entry sync",
|
||||
"from", lastRemoteIndex,
|
||||
"to", remote.QueryMeta.Index,
|
||||
)
|
||||
lastRemoteIndex = 0
|
||||
}
|
||||
|
||||
replicationLogger.Debug("Config Entry replication",
|
||||
logger.Debug("Config Entry replication",
|
||||
"local", len(local),
|
||||
"remote", len(remote.Entries),
|
||||
)
|
||||
|
@ -177,13 +174,13 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
// apply them.
|
||||
deletions, updates := diffConfigEntries(local, remote.Entries, lastRemoteIndex)
|
||||
|
||||
replicationLogger.Debug("Config Entry replication",
|
||||
logger.Debug("Config Entry replication",
|
||||
"deletions", len(deletions),
|
||||
"updates", len(updates),
|
||||
)
|
||||
|
||||
if len(deletions) > 0 {
|
||||
replicationLogger.Debug("Deleting local config entries",
|
||||
logger.Debug("Deleting local config entries",
|
||||
"deletions", len(deletions),
|
||||
)
|
||||
|
||||
|
@ -194,11 +191,11 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to delete local config entries: %v", err)
|
||||
}
|
||||
replicationLogger.Debug("Config Entry replication - finished deletions")
|
||||
logger.Debug("Config Entry replication - finished deletions")
|
||||
}
|
||||
|
||||
if len(updates) > 0 {
|
||||
replicationLogger.Debug("Updating local config entries",
|
||||
logger.Debug("Updating local config entries",
|
||||
"updates", len(updates),
|
||||
)
|
||||
exit, err := s.reconcileLocalConfig(ctx, updates, structs.ConfigEntryUpsert)
|
||||
|
@ -208,7 +205,7 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, lo
|
|||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to update local config entries: %v", err)
|
||||
}
|
||||
replicationLogger.Debug("Config Entry replication - finished updates")
|
||||
logger.Debug("Config Entry replication - finished updates")
|
||||
}
|
||||
|
||||
// Return the index we got back from the remote side, since we've synced
|
||||
|
|
|
@ -472,8 +472,11 @@ func (s *Intention) Check(
|
|||
// so we can safely log it without handling non-critical errors at the usage site.
|
||||
func (s *Intention) aclAccessorID(secretID string) string {
|
||||
_, ident, err := s.srv.ResolveIdentityFromToken(secretID)
|
||||
if acl.IsErrNotFound(err) {
|
||||
return ""
|
||||
}
|
||||
if err != nil {
|
||||
s.srv.logger.Debug("error", err)
|
||||
s.logger.Debug("non-critical error resolving acl token accessor for logging", "error", err)
|
||||
return ""
|
||||
}
|
||||
if ident == nil {
|
||||
|
|
|
@ -266,8 +266,11 @@ func (m *Internal) executeKeyringOpMgr(
|
|||
// so we can safely log it without handling non-critical errors at the usage site.
|
||||
func (m *Internal) aclAccessorID(secretID string) string {
|
||||
_, ident, err := m.srv.ResolveIdentityFromToken(secretID)
|
||||
if acl.IsErrNotFound(err) {
|
||||
return ""
|
||||
}
|
||||
if err != nil {
|
||||
m.srv.logger.Debug("error", err)
|
||||
m.logger.Debug("non-critical error resolving acl token accessor for logging", "error", err)
|
||||
return ""
|
||||
}
|
||||
if ident == nil {
|
||||
|
|
|
@ -396,7 +396,7 @@ func NewServerLogger(config *Config, logger hclog.InterceptLogger, tokens *token
|
|||
Delegate: &FunctionReplicator{ReplicateFn: s.replicateConfig},
|
||||
Rate: s.config.ConfigReplicationRate,
|
||||
Burst: s.config.ConfigReplicationBurst,
|
||||
Logger: s.loggers.Named(logging.Replication).Named(logging.ConfigEntry),
|
||||
Logger: s.logger,
|
||||
}
|
||||
s.configReplicator, err = NewReplicator(&configReplicatorConfig)
|
||||
if err != nil {
|
||||
|
|
|
@ -1361,8 +1361,11 @@ func (l *State) notifyIfAliased(serviceID structs.ServiceID) {
|
|||
// so we can safely log it without handling non-critical errors at the usage site.
|
||||
func (l *State) aclAccessorID(secretID string) string {
|
||||
_, ident, err := l.Delegate.ResolveIdentityFromToken(secretID)
|
||||
if acl.IsErrNotFound(err) {
|
||||
return ""
|
||||
}
|
||||
if err != nil {
|
||||
l.logger.Debug("error", err)
|
||||
l.logger.Debug("non-critical error resolving acl token accessor for logging", "error", err)
|
||||
return ""
|
||||
}
|
||||
if ident == nil {
|
||||
|
|
Loading…
Reference in New Issue