Backport of Add extra logging for mesh health endpoints. into release/1.16.x (#18648)

backport of commit 9c7f4f960e59547a0429f8cf853098d2c070b7f1

Co-authored-by: Derek Menteer <derek.menteer@hashicorp.com>
This commit is contained in:
hc-github-team-consul-core 2023-09-01 12:44:37 -05:00 committed by GitHub
parent 8fbef46443
commit 3318c83705
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 20 additions and 1 deletions

View File

@ -106,6 +106,12 @@ func (h *serverHealthBlocking) Notify(ctx context.Context, args *structs.Service
// their data, rather than holding onto the last-known list of healthy nodes indefinitely.
if hadResults {
hadResults = false
h.deps.Logger.Debug("serverHealthBlocking emitting zero check-service-nodes due to insufficient ACL privileges",
"serviceName", structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta),
"correlationID", correlationID,
"connect", args.Connect,
"ingress", args.Ingress,
)
return 0, &structs.IndexedCheckServiceNodes{}, watch.ErrorACLResetData
}
return 0, nil, acl.ErrPermissionDenied
@ -132,6 +138,13 @@ func (h *serverHealthBlocking) Notify(ctx context.Context, args *structs.Service
}
hadResults = true
h.deps.Logger.Trace("serverHealthBlocking emitting check-service-nodes",
"serviceName", structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta),
"correlationID", correlationID,
"connect", args.Connect,
"ingress", args.Ingress,
"nodes", len(thisReply.Nodes),
)
return thisReply.Index, &thisReply, nil
},
dispatchBlockingQueryUpdate[*structs.IndexedCheckServiceNodes](ch),

View File

@ -136,6 +136,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
uid := UpstreamIDFromString(uidString)
s.logger.Debug("upstream-target watch fired",
"correlationID", correlationID,
"nodes", len(resp.Nodes),
)
if _, ok := upstreamsSnapshot.WatchedUpstreamEndpoints[uid]; !ok {
upstreamsSnapshot.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
}

View File

@ -723,6 +723,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
}
switch len(groupedTarget.Targets) {
case 0:
s.Logger.Trace("skipping endpoint generation for zero-length target group", "cluster", clusterName)
continue
case 1:
// We expect one target so this passes through to continue setting the load assignment up.
@ -730,7 +731,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
return nil, fmt.Errorf("cannot have more than one target")
}
ti := groupedTarget.Targets[0]
s.Logger.Debug("generating endpoints for", "cluster", clusterName, "targetID", ti.TargetID)
s.Logger.Trace("generating endpoints for", "cluster", clusterName, "targetID", ti.TargetID, "gatewayKey", gatewayKey)
targetUID := proxycfg.NewUpstreamIDFromTargetID(ti.TargetID)
if targetUID.Peer != "" {
loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, targetUID, mgwMode)
@ -752,6 +753,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
forMeshGateway,
)
if !valid {
s.Logger.Trace("skipping endpoint generation for invalid target group", "cluster", clusterName)
continue // skip the cluster if we're still populating the snapshot
}