Add trace logs to proxycfg state runner and xds srv

This commit is contained in:
freddygv 2021-02-02 12:26:38 -07:00
parent 0fb96afe31
commit a0be7dcc1d
3 changed files with 42 additions and 11 deletions

View File

@ -583,6 +583,8 @@ func (s *state) initialConfigSnapshot() ConfigSnapshot {
} }
func (s *state) run() { func (s *state) run() {
logger := s.logger.Named(logging.ProxyConfig)
// Close the channel we return from Watch when we stop so consumers can stop // Close the channel we return from Watch when we stop so consumers can stop
// watching and clean up their goroutines. It's important we do this here and // watching and clean up their goroutines. It's important we do this here and
// not in Close since this routine sends on this chan and so might panic if it // not in Close since this routine sends on this chan and so might panic if it
@ -603,10 +605,13 @@ func (s *state) run() {
case <-s.ctx.Done(): case <-s.ctx.Done():
return return
case u := <-s.ch: case u := <-s.ch:
logger.Trace("A blocking query returned; handling snapshot update",
"proxy-id", s.proxyID.String(),
)
if err := s.handleUpdate(u, &snap); err != nil { if err := s.handleUpdate(u, &snap); err != nil {
s.logger.Error("watch error", logger.Error("Failed to handle update from watch",
"id", u.CorrelationID, "id", u.CorrelationID, "error", err,
"error", err,
) )
continue continue
} }
@ -616,18 +621,24 @@ func (s *state) run() {
// etc on future updates. // etc on future updates.
snapCopy, err := snap.Clone() snapCopy, err := snap.Clone()
if err != nil { if err != nil {
s.logger.Error("Failed to copy config snapshot for proxy", logger.Error("Failed to copy config snapshot for proxy",
"proxy", s.proxyID, "proxy-id", s.proxyID.String(), "error", err,
"error", err,
) )
continue continue
} }
select { select {
// try to send
case s.snapCh <- *snapCopy: case s.snapCh <- *snapCopy:
// try to send logger.Trace("Delivered new snapshot to proxy config watchers",
"proxy-id", s.proxyID.String(),
)
// avoid blocking if a snapshot is already buffered
default: default:
// avoid blocking if a snapshot is already buffered logger.Trace("Failed to deliver new snapshot to proxy config watchers",
"proxy-id", s.proxyID.String(),
)
} }
// Allow the next change to trigger a send // Allow the next change to trigger a send
@ -638,18 +649,25 @@ func (s *state) run() {
continue continue
case replyCh := <-s.reqCh: case replyCh := <-s.reqCh:
logger.Trace("A proxy config snapshot was requested",
"proxy-id", s.proxyID.String(),
)
if !snap.Valid() { if !snap.Valid() {
// Not valid yet just respond with nil and move on to next task. // Not valid yet just respond with nil and move on to next task.
replyCh <- nil replyCh <- nil
logger.Trace("The proxy's config snapshot is not valid yet",
"proxy-id", s.proxyID.String(),
)
continue continue
} }
// Make a deep copy of snap so we don't mutate any of the embedded structs // Make a deep copy of snap so we don't mutate any of the embedded structs
// etc on future updates. // etc on future updates.
snapCopy, err := snap.Clone() snapCopy, err := snap.Clone()
if err != nil { if err != nil {
s.logger.Error("Failed to copy config snapshot for proxy", logger.Error("Failed to copy config snapshot for proxy",
"proxy", s.proxyID, "proxy-id", s.proxyID.String(), "error", err,
"error", err,
) )
continue continue
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/hashicorp/consul/logging"
"sync/atomic" "sync/atomic"
"time" "time"
@ -164,6 +165,8 @@ const (
) )
func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest) error { func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest) error {
logger := s.Logger.Named(logging.XDS)
// xDS requires a unique nonce to correlate response/request pairs // xDS requires a unique nonce to correlate response/request pairs
var nonce uint64 var nonce uint64
@ -324,6 +327,9 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
// state machine. // state machine.
defer watchCancel() defer watchCancel()
logger.Trace("watching proxy, pending initial proxycfg snapshot",
"proxy-id", proxyID.String())
// Now wait for the config so we can check ACL // Now wait for the config so we can check ACL
state = statePendingInitialConfig state = statePendingInitialConfig
case statePendingInitialConfig: case statePendingInitialConfig:
@ -335,6 +341,9 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
// Got config, try to authenticate next. // Got config, try to authenticate next.
state = stateRunning state = stateRunning
logger.Trace("Got initial config snapshot",
"proxy-id", cfgSnap.ProxyID.String())
// Lets actually process the config we just got or we'll mis responding // Lets actually process the config we just got or we'll mis responding
fallthrough fallthrough
case stateRunning: case stateRunning:
@ -346,6 +355,9 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
// timer is first started. // timer is first started.
extendAuthTimer() extendAuthTimer()
logger.Trace("Invoking all xDS resource handlers and sending new data if there is any",
"proxy-id", cfgSnap.ProxyID.String())
// See if any handlers need to have the current (possibly new) config // See if any handlers need to have the current (possibly new) config
// sent. Note the order here is actually significant so we can't just // sent. Note the order here is actually significant so we can't just
// range the map which has no determined order. It's important because: // range the map which has no determined order. It's important because:

View File

@ -56,5 +56,6 @@ const (
UIMetricsProxy string = "ui_metrics_proxy" UIMetricsProxy string = "ui_metrics_proxy"
WAN string = "wan" WAN string = "wan"
Watch string = "watch" Watch string = "watch"
XDS string = "xds"
Vault string = "vault" Vault string = "vault"
) )