Add trace logs to proxycfg state runner and xds srv
This commit is contained in:
parent
0fb96afe31
commit
a0be7dcc1d
|
@ -583,6 +583,8 @@ func (s *state) initialConfigSnapshot() ConfigSnapshot {
|
|||
}
|
||||
|
||||
func (s *state) run() {
|
||||
logger := s.logger.Named(logging.ProxyConfig)
|
||||
|
||||
// Close the channel we return from Watch when we stop so consumers can stop
|
||||
// watching and clean up their goroutines. It's important we do this here and
|
||||
// not in Close since this routine sends on this chan and so might panic if it
|
||||
|
@ -603,10 +605,13 @@ func (s *state) run() {
|
|||
case <-s.ctx.Done():
|
||||
return
|
||||
case u := <-s.ch:
|
||||
logger.Trace("A blocking query returned; handling snapshot update",
|
||||
"proxy-id", s.proxyID.String(),
|
||||
)
|
||||
|
||||
if err := s.handleUpdate(u, &snap); err != nil {
|
||||
s.logger.Error("watch error",
|
||||
"id", u.CorrelationID,
|
||||
"error", err,
|
||||
logger.Error("Failed to handle update from watch",
|
||||
"id", u.CorrelationID, "error", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
@ -616,18 +621,24 @@ func (s *state) run() {
|
|||
// etc on future updates.
|
||||
snapCopy, err := snap.Clone()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to copy config snapshot for proxy",
|
||||
"proxy", s.proxyID,
|
||||
"error", err,
|
||||
logger.Error("Failed to copy config snapshot for proxy",
|
||||
"proxy-id", s.proxyID.String(), "error", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case s.snapCh <- *snapCopy:
|
||||
// try to send
|
||||
default:
|
||||
case s.snapCh <- *snapCopy:
|
||||
logger.Trace("Delivered new snapshot to proxy config watchers",
|
||||
"proxy-id", s.proxyID.String(),
|
||||
)
|
||||
|
||||
// avoid blocking if a snapshot is already buffered
|
||||
default:
|
||||
logger.Trace("Failed to deliver new snapshot to proxy config watchers",
|
||||
"proxy-id", s.proxyID.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// Allow the next change to trigger a send
|
||||
|
@ -638,18 +649,25 @@ func (s *state) run() {
|
|||
continue
|
||||
|
||||
case replyCh := <-s.reqCh:
|
||||
logger.Trace("A proxy config snapshot was requested",
|
||||
"proxy-id", s.proxyID.String(),
|
||||
)
|
||||
|
||||
if !snap.Valid() {
|
||||
// Not valid yet just respond with nil and move on to next task.
|
||||
replyCh <- nil
|
||||
|
||||
logger.Trace("The proxy's config snapshot is not valid yet",
|
||||
"proxy-id", s.proxyID.String(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
// Make a deep copy of snap so we don't mutate any of the embedded structs
|
||||
// etc on future updates.
|
||||
snapCopy, err := snap.Clone()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to copy config snapshot for proxy",
|
||||
"proxy", s.proxyID,
|
||||
"error", err,
|
||||
logger.Error("Failed to copy config snapshot for proxy",
|
||||
"proxy-id", s.proxyID.String(), "error", err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -164,6 +165,8 @@ const (
|
|||
)
|
||||
|
||||
func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest) error {
|
||||
logger := s.Logger.Named(logging.XDS)
|
||||
|
||||
// xDS requires a unique nonce to correlate response/request pairs
|
||||
var nonce uint64
|
||||
|
||||
|
@ -324,6 +327,9 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
// state machine.
|
||||
defer watchCancel()
|
||||
|
||||
logger.Trace("watching proxy, pending initial proxycfg snapshot",
|
||||
"proxy-id", proxyID.String())
|
||||
|
||||
// Now wait for the config so we can check ACL
|
||||
state = statePendingInitialConfig
|
||||
case statePendingInitialConfig:
|
||||
|
@ -335,6 +341,9 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
// Got config, try to authenticate next.
|
||||
state = stateRunning
|
||||
|
||||
logger.Trace("Got initial config snapshot",
|
||||
"proxy-id", cfgSnap.ProxyID.String())
|
||||
|
||||
// Lets actually process the config we just got or we'll mis responding
|
||||
fallthrough
|
||||
case stateRunning:
|
||||
|
@ -346,6 +355,9 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
// timer is first started.
|
||||
extendAuthTimer()
|
||||
|
||||
logger.Trace("Invoking all xDS resource handlers and sending new data if there is any",
|
||||
"proxy-id", cfgSnap.ProxyID.String())
|
||||
|
||||
// See if any handlers need to have the current (possibly new) config
|
||||
// sent. Note the order here is actually significant so we can't just
|
||||
// range the map which has no determined order. It's important because:
|
||||
|
|
|
@ -56,5 +56,6 @@ const (
|
|||
UIMetricsProxy string = "ui_metrics_proxy"
|
||||
WAN string = "wan"
|
||||
Watch string = "watch"
|
||||
XDS string = "xds"
|
||||
Vault string = "vault"
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue