Allow users to configure either unstructured or JSON logging (#7130)
* hclog Allow users to choose between unstructured and JSON logging
This commit is contained in:
parent
3044b4bf89
commit
3dd0b59793
|
@ -62,7 +62,7 @@ func (a *Agent) resolveIdentityFromToken(secretID string) (bool, structs.ACLIden
|
||||||
func (a *Agent) aclAccessorID(secretID string) string {
|
func (a *Agent) aclAccessorID(secretID string) string {
|
||||||
_, ident, err := a.resolveIdentityFromToken(secretID)
|
_, ident, err := a.resolveIdentityFromToken(secretID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[DEBUG] agent.acl: %v", err)
|
a.logger.Debug("error", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if ident == nil {
|
if ident == nil {
|
||||||
|
@ -281,7 +281,7 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
accessorID := a.aclAccessorID(token)
|
accessorID := a.aclAccessorID(token)
|
||||||
a.logger.Printf("[DEBUG] agent: dropping node from result due to ACLs, node=%q accessorID=%q", node, accessorID)
|
a.logger.Debug("dropping node from result due to ACLs", "node", node, "accessorID", accessorID)
|
||||||
m = append(m[:i], m[i+1:]...)
|
m = append(m[:i], m[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ func (a *Agent) filterServicesWithAuthorizer(authz acl.Authorizer, services *map
|
||||||
if authz.ServiceRead(service.Service, &authzContext) == acl.Allow {
|
if authz.ServiceRead(service.Service, &authzContext) == acl.Allow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
a.logger.Printf("[DEBUG] agent: dropping service from result due to ACLs, service=%q", id.String())
|
a.logger.Debug("dropping service from result due to ACLs", "service", id.String())
|
||||||
delete(*services, id)
|
delete(*services, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -347,7 +347,7 @@ func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[str
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.logger.Printf("[DEBUG] agent: dropping check from result due to ACLs, check=%q", id.String())
|
a.logger.Debug("dropping check from result due to ACLs", "check", id.String())
|
||||||
delete(*checks, id)
|
delete(*checks, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -3,7 +3,6 @@ package agent
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -14,9 +13,9 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/local"
|
"github.com/hashicorp/consul/agent/local"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/logger"
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -39,9 +38,6 @@ type TestACLAgent struct {
|
||||||
// to os.Stderr.
|
// to os.Stderr.
|
||||||
LogOutput io.Writer
|
LogOutput io.Writer
|
||||||
|
|
||||||
// LogWriter is used for streaming logs.
|
|
||||||
LogWriter *logger.LogWriter
|
|
||||||
|
|
||||||
// DataDir is the data directory which is used when Config.DataDir
|
// DataDir is the data directory which is used when Config.DataDir
|
||||||
// is not set. It is created automatically and removed when
|
// is not set. It is created automatically and removed when
|
||||||
// Shutdown() is called.
|
// Shutdown() is called.
|
||||||
|
@ -60,7 +56,11 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveFn func(strin
|
||||||
hclDataDir := `data_dir = "acl-agent"`
|
hclDataDir := `data_dir = "acl-agent"`
|
||||||
|
|
||||||
logOutput := testutil.TestWriter(t)
|
logOutput := testutil.TestWriter(t)
|
||||||
logger := log.New(logOutput, a.Name+" - ", log.LstdFlags|log.Lmicroseconds)
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
|
Name: a.Name,
|
||||||
|
Level: hclog.Debug,
|
||||||
|
Output: logOutput,
|
||||||
|
})
|
||||||
|
|
||||||
a.Config = TestConfig(logger,
|
a.Config = TestConfig(logger,
|
||||||
config.Source{Name: a.Name, Format: "hcl", Data: a.HCL},
|
config.Source{Name: a.Name, Format: "hcl", Data: a.HCL},
|
||||||
|
@ -74,7 +74,6 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveFn func(strin
|
||||||
a.Agent = agent
|
a.Agent = agent
|
||||||
|
|
||||||
agent.LogOutput = logOutput
|
agent.LogOutput = logOutput
|
||||||
agent.LogWriter = a.LogWriter
|
|
||||||
agent.logger = logger
|
agent.logger = logger
|
||||||
agent.MemSink = metrics.NewInmemSink(1*time.Second, time.Minute)
|
agent.MemSink = metrics.NewInmemSink(1*time.Second, time.Minute)
|
||||||
|
|
||||||
|
|
|
@ -3,12 +3,12 @@ package ae
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/lib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// scaleThreshold is the number of nodes after which regular sync runs are
|
// scaleThreshold is the number of nodes after which regular sync runs are
|
||||||
|
@ -60,7 +60,7 @@ type StateSyncer struct {
|
||||||
ShutdownCh chan struct{}
|
ShutdownCh chan struct{}
|
||||||
|
|
||||||
// Logger is the logger.
|
// Logger is the logger.
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
|
|
||||||
// ClusterSize returns the number of members in the cluster to
|
// ClusterSize returns the number of members in the cluster to
|
||||||
// allow staggering the sync runs based on cluster size.
|
// allow staggering the sync runs based on cluster size.
|
||||||
|
@ -107,12 +107,16 @@ const (
|
||||||
retryFailIntv = 15 * time.Second
|
retryFailIntv = 15 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewStateSyncer(state SyncState, intv time.Duration, shutdownCh chan struct{}, logger *log.Logger) *StateSyncer {
|
func NewStateSyncer(state SyncState, intv time.Duration, shutdownCh chan struct{}, logger hclog.Logger) *StateSyncer {
|
||||||
|
if logger == nil {
|
||||||
|
logger = hclog.New(&hclog.LoggerOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
s := &StateSyncer{
|
s := &StateSyncer{
|
||||||
State: state,
|
State: state,
|
||||||
Interval: intv,
|
Interval: intv,
|
||||||
ShutdownCh: shutdownCh,
|
ShutdownCh: shutdownCh,
|
||||||
Logger: logger,
|
Logger: logger.Named(logging.AntiEntropy),
|
||||||
SyncFull: NewTrigger(),
|
SyncFull: NewTrigger(),
|
||||||
SyncChanges: NewTrigger(),
|
SyncChanges: NewTrigger(),
|
||||||
serverUpInterval: serverUpIntv,
|
serverUpInterval: serverUpIntv,
|
||||||
|
@ -166,7 +170,7 @@ func (s *StateSyncer) nextFSMState(fs fsmState) fsmState {
|
||||||
|
|
||||||
err := s.State.SyncFull()
|
err := s.State.SyncFull()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Logger.Printf("[ERR] agent: failed to sync remote state: %v", err)
|
s.Logger.Error("failed to sync remote state", "error", err)
|
||||||
return retryFullSyncState
|
return retryFullSyncState
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +200,7 @@ func (s *StateSyncer) nextFSMState(fs fsmState) fsmState {
|
||||||
|
|
||||||
err := s.State.SyncChanges()
|
err := s.State.SyncChanges()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Logger.Printf("[ERR] agent: failed to sync changes: %v", err)
|
s.Logger.Error("failed to sync changes", "error", err)
|
||||||
}
|
}
|
||||||
return partialSyncState
|
return partialSyncState
|
||||||
|
|
||||||
|
|
|
@ -3,14 +3,14 @@ package ae
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ func TestAE_staggerDependsOnClusterSize(t *testing.T) {
|
||||||
libRandomStagger = func(d time.Duration) time.Duration { return d }
|
libRandomStagger = func(d time.Duration) time.Duration { return d }
|
||||||
defer func() { libRandomStagger = lib.RandomStagger }()
|
defer func() { libRandomStagger = lib.RandomStagger }()
|
||||||
|
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
if got, want := l.staggerFn(10*time.Millisecond), 10*time.Millisecond; got != want {
|
if got, want := l.staggerFn(10*time.Millisecond), 10*time.Millisecond; got != want {
|
||||||
t.Fatalf("got %v want %v", got, want)
|
t.Fatalf("got %v want %v", got, want)
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ func TestAE_Run_SyncFullBeforeChanges(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// indicate that we have partial changes before starting Run
|
// indicate that we have partial changes before starting Run
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.State = state
|
l.State = state
|
||||||
l.ShutdownCh = shutdownCh
|
l.ShutdownCh = shutdownCh
|
||||||
l.SyncChanges.Trigger()
|
l.SyncChanges.Trigger()
|
||||||
|
@ -132,7 +132,7 @@ func TestAE_Run_Quit(t *testing.T) {
|
||||||
t.Fatal("Run should panic")
|
t.Fatal("Run should panic")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.ClusterSize = nil
|
l.ClusterSize = nil
|
||||||
l.Run()
|
l.Run()
|
||||||
})
|
})
|
||||||
|
@ -140,7 +140,7 @@ func TestAE_Run_Quit(t *testing.T) {
|
||||||
// start timer which explodes if runFSM does not quit
|
// start timer which explodes if runFSM does not quit
|
||||||
tm := time.AfterFunc(time.Second, func() { panic("timeout") })
|
tm := time.AfterFunc(time.Second, func() { panic("timeout") })
|
||||||
|
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.runFSM(fullSyncState, func(fsmState) fsmState { return doneState })
|
l.runFSM(fullSyncState, func(fsmState) fsmState { return doneState })
|
||||||
// should just quit
|
// should just quit
|
||||||
tm.Stop()
|
tm.Stop()
|
||||||
|
@ -150,7 +150,7 @@ func TestAE_Run_Quit(t *testing.T) {
|
||||||
func TestAE_FSM(t *testing.T) {
|
func TestAE_FSM(t *testing.T) {
|
||||||
t.Run("fullSyncState", func(t *testing.T) {
|
t.Run("fullSyncState", func(t *testing.T) {
|
||||||
t.Run("Paused -> retryFullSyncState", func(t *testing.T) {
|
t.Run("Paused -> retryFullSyncState", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.Pause()
|
l.Pause()
|
||||||
fs := l.nextFSMState(fullSyncState)
|
fs := l.nextFSMState(fullSyncState)
|
||||||
if got, want := fs, retryFullSyncState; got != want {
|
if got, want := fs, retryFullSyncState; got != want {
|
||||||
|
@ -158,7 +158,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("SyncFull() error -> retryFullSyncState", func(t *testing.T) {
|
t.Run("SyncFull() error -> retryFullSyncState", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.State = &mock{syncFull: func() error { return errors.New("boom") }}
|
l.State = &mock{syncFull: func() error { return errors.New("boom") }}
|
||||||
fs := l.nextFSMState(fullSyncState)
|
fs := l.nextFSMState(fullSyncState)
|
||||||
if got, want := fs, retryFullSyncState; got != want {
|
if got, want := fs, retryFullSyncState; got != want {
|
||||||
|
@ -166,7 +166,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("SyncFull() OK -> partialSyncState", func(t *testing.T) {
|
t.Run("SyncFull() OK -> partialSyncState", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.State = &mock{}
|
l.State = &mock{}
|
||||||
fs := l.nextFSMState(fullSyncState)
|
fs := l.nextFSMState(fullSyncState)
|
||||||
if got, want := fs, partialSyncState; got != want {
|
if got, want := fs, partialSyncState; got != want {
|
||||||
|
@ -178,7 +178,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
t.Run("retryFullSyncState", func(t *testing.T) {
|
t.Run("retryFullSyncState", func(t *testing.T) {
|
||||||
// helper for testing state transitions from retrySyncFullState
|
// helper for testing state transitions from retrySyncFullState
|
||||||
test := func(ev event, to fsmState) {
|
test := func(ev event, to fsmState) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.retrySyncFullEvent = func() event { return ev }
|
l.retrySyncFullEvent = func() event { return ev }
|
||||||
fs := l.nextFSMState(retryFullSyncState)
|
fs := l.nextFSMState(retryFullSyncState)
|
||||||
if got, want := fs, to; got != want {
|
if got, want := fs, to; got != want {
|
||||||
|
@ -208,7 +208,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
t.Run("partialSyncState", func(t *testing.T) {
|
t.Run("partialSyncState", func(t *testing.T) {
|
||||||
// helper for testing state transitions from partialSyncState
|
// helper for testing state transitions from partialSyncState
|
||||||
test := func(ev event, to fsmState) {
|
test := func(ev event, to fsmState) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.syncChangesEvent = func() event { return ev }
|
l.syncChangesEvent = func() event { return ev }
|
||||||
fs := l.nextFSMState(partialSyncState)
|
fs := l.nextFSMState(partialSyncState)
|
||||||
if got, want := fs, to; got != want {
|
if got, want := fs, to; got != want {
|
||||||
|
@ -225,7 +225,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
test(syncFullTimerEvent, fullSyncState)
|
test(syncFullTimerEvent, fullSyncState)
|
||||||
})
|
})
|
||||||
t.Run("syncChangesEvent+Paused -> partialSyncState", func(t *testing.T) {
|
t.Run("syncChangesEvent+Paused -> partialSyncState", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.Pause()
|
l.Pause()
|
||||||
l.syncChangesEvent = func() event { return syncChangesNotifEvent }
|
l.syncChangesEvent = func() event { return syncChangesNotifEvent }
|
||||||
fs := l.nextFSMState(partialSyncState)
|
fs := l.nextFSMState(partialSyncState)
|
||||||
|
@ -234,7 +234,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("syncChangesEvent+SyncChanges() error -> partialSyncState", func(t *testing.T) {
|
t.Run("syncChangesEvent+SyncChanges() error -> partialSyncState", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.State = &mock{syncChanges: func() error { return errors.New("boom") }}
|
l.State = &mock{syncChanges: func() error { return errors.New("boom") }}
|
||||||
l.syncChangesEvent = func() event { return syncChangesNotifEvent }
|
l.syncChangesEvent = func() event { return syncChangesNotifEvent }
|
||||||
fs := l.nextFSMState(partialSyncState)
|
fs := l.nextFSMState(partialSyncState)
|
||||||
|
@ -243,7 +243,7 @@ func TestAE_FSM(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("syncChangesEvent+SyncChanges() OK -> partialSyncState", func(t *testing.T) {
|
t.Run("syncChangesEvent+SyncChanges() OK -> partialSyncState", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.State = &mock{}
|
l.State = &mock{}
|
||||||
l.syncChangesEvent = func() event { return syncChangesNotifEvent }
|
l.syncChangesEvent = func() event { return syncChangesNotifEvent }
|
||||||
fs := l.nextFSMState(partialSyncState)
|
fs := l.nextFSMState(partialSyncState)
|
||||||
|
@ -268,14 +268,14 @@ func TestAE_FSM(t *testing.T) {
|
||||||
t.Fatal("invalid state should panic")
|
t.Fatal("invalid state should panic")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.nextFSMState(fsmState("invalid"))
|
l.nextFSMState(fsmState("invalid"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAE_RetrySyncFullEvent(t *testing.T) {
|
func TestAE_RetrySyncFullEvent(t *testing.T) {
|
||||||
t.Run("trigger shutdownEvent", func(t *testing.T) {
|
t.Run("trigger shutdownEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.ShutdownCh = make(chan struct{})
|
l.ShutdownCh = make(chan struct{})
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.retrySyncFullEvent() }()
|
go func() { evch <- l.retrySyncFullEvent() }()
|
||||||
|
@ -285,7 +285,7 @@ func TestAE_RetrySyncFullEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger shutdownEvent during FullNotif", func(t *testing.T) {
|
t.Run("trigger shutdownEvent during FullNotif", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.ShutdownCh = make(chan struct{})
|
l.ShutdownCh = make(chan struct{})
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.retrySyncFullEvent() }()
|
go func() { evch <- l.retrySyncFullEvent() }()
|
||||||
|
@ -297,7 +297,7 @@ func TestAE_RetrySyncFullEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger syncFullNotifEvent", func(t *testing.T) {
|
t.Run("trigger syncFullNotifEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.serverUpInterval = 10 * time.Millisecond
|
l.serverUpInterval = 10 * time.Millisecond
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.retrySyncFullEvent() }()
|
go func() { evch <- l.retrySyncFullEvent() }()
|
||||||
|
@ -307,7 +307,7 @@ func TestAE_RetrySyncFullEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger syncFullTimerEvent", func(t *testing.T) {
|
t.Run("trigger syncFullTimerEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.retryFailInterval = 10 * time.Millisecond
|
l.retryFailInterval = 10 * time.Millisecond
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.retrySyncFullEvent() }()
|
go func() { evch <- l.retrySyncFullEvent() }()
|
||||||
|
@ -319,7 +319,7 @@ func TestAE_RetrySyncFullEvent(t *testing.T) {
|
||||||
|
|
||||||
func TestAE_SyncChangesEvent(t *testing.T) {
|
func TestAE_SyncChangesEvent(t *testing.T) {
|
||||||
t.Run("trigger shutdownEvent", func(t *testing.T) {
|
t.Run("trigger shutdownEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.ShutdownCh = make(chan struct{})
|
l.ShutdownCh = make(chan struct{})
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.syncChangesEvent() }()
|
go func() { evch <- l.syncChangesEvent() }()
|
||||||
|
@ -329,7 +329,7 @@ func TestAE_SyncChangesEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger shutdownEvent during FullNotif", func(t *testing.T) {
|
t.Run("trigger shutdownEvent during FullNotif", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.ShutdownCh = make(chan struct{})
|
l.ShutdownCh = make(chan struct{})
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.syncChangesEvent() }()
|
go func() { evch <- l.syncChangesEvent() }()
|
||||||
|
@ -341,7 +341,7 @@ func TestAE_SyncChangesEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger syncFullNotifEvent", func(t *testing.T) {
|
t.Run("trigger syncFullNotifEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.serverUpInterval = 10 * time.Millisecond
|
l.serverUpInterval = 10 * time.Millisecond
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.syncChangesEvent() }()
|
go func() { evch <- l.syncChangesEvent() }()
|
||||||
|
@ -351,7 +351,7 @@ func TestAE_SyncChangesEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger syncFullTimerEvent", func(t *testing.T) {
|
t.Run("trigger syncFullTimerEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
l.Interval = 10 * time.Millisecond
|
l.Interval = 10 * time.Millisecond
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.syncChangesEvent() }()
|
go func() { evch <- l.syncChangesEvent() }()
|
||||||
|
@ -360,7 +360,7 @@ func TestAE_SyncChangesEvent(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("trigger syncChangesNotifEvent", func(t *testing.T) {
|
t.Run("trigger syncChangesNotifEvent", func(t *testing.T) {
|
||||||
l := testSyncer()
|
l := testSyncer(t)
|
||||||
evch := make(chan event)
|
evch := make(chan event)
|
||||||
go func() { evch <- l.syncChangesEvent() }()
|
go func() { evch <- l.syncChangesEvent() }()
|
||||||
l.SyncChanges.Trigger()
|
l.SyncChanges.Trigger()
|
||||||
|
@ -391,8 +391,12 @@ func (m *mock) SyncChanges() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSyncer() *StateSyncer {
|
func testSyncer(t *testing.T) *StateSyncer {
|
||||||
logger := log.New(os.Stderr, "", 0)
|
logger := hclog.New(&hclog.LoggerOptions{
|
||||||
|
Level: 0,
|
||||||
|
Output: testutil.TestWriter(t),
|
||||||
|
})
|
||||||
|
|
||||||
l := NewStateSyncer(nil, time.Second, nil, logger)
|
l := NewStateSyncer(nil, time.Second, nil, logger)
|
||||||
l.stagger = func(d time.Duration) time.Duration { return d }
|
l.stagger = func(d time.Duration) time.Duration { return d }
|
||||||
l.ClusterSize = func() int { return 1 }
|
l.ClusterSize = func() int { return 1 }
|
||||||
|
|
383
agent/agent.go
383
agent/agent.go
|
@ -8,7 +8,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -19,6 +18,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -42,7 +42,7 @@ import (
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/lib/file"
|
"github.com/hashicorp/consul/lib/file"
|
||||||
"github.com/hashicorp/consul/logger"
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
|
@ -164,14 +164,11 @@ type Agent struct {
|
||||||
config *config.RuntimeConfig
|
config *config.RuntimeConfig
|
||||||
|
|
||||||
// Used for writing our logs
|
// Used for writing our logs
|
||||||
logger *log.Logger
|
logger hclog.InterceptLogger
|
||||||
|
|
||||||
// Output sink for logs
|
// Output sink for logs
|
||||||
LogOutput io.Writer
|
LogOutput io.Writer
|
||||||
|
|
||||||
// Used for streaming logs to
|
|
||||||
LogWriter *logger.LogWriter
|
|
||||||
|
|
||||||
// In-memory sink used for collecting metrics
|
// In-memory sink used for collecting metrics
|
||||||
MemSink *metrics.InmemSink
|
MemSink *metrics.InmemSink
|
||||||
|
|
||||||
|
@ -312,7 +309,7 @@ type Agent struct {
|
||||||
|
|
||||||
// New verifies the configuration given has a Datacenter and DataDir
|
// New verifies the configuration given has a Datacenter and DataDir
|
||||||
// configured, and maps the remaining config fields to fields on the Agent.
|
// configured, and maps the remaining config fields to fields on the Agent.
|
||||||
func New(c *config.RuntimeConfig, logger *log.Logger) (*Agent, error) {
|
func New(c *config.RuntimeConfig, logger hclog.InterceptLogger) (*Agent, error) {
|
||||||
if c.Datacenter == "" {
|
if c.Datacenter == "" {
|
||||||
return nil, fmt.Errorf("Must configure a Datacenter")
|
return nil, fmt.Errorf("Must configure a Datacenter")
|
||||||
}
|
}
|
||||||
|
@ -382,13 +379,17 @@ func (a *Agent) Start() error {
|
||||||
|
|
||||||
// Warn if the node name is incompatible with DNS
|
// Warn if the node name is incompatible with DNS
|
||||||
if InvalidDnsRe.MatchString(a.config.NodeName) {
|
if InvalidDnsRe.MatchString(a.config.NodeName) {
|
||||||
a.logger.Printf("[WARN] agent: Node name %q will not be discoverable "+
|
a.logger.Warn("Node name will not be discoverable "+
|
||||||
"via DNS due to invalid characters. Valid characters include "+
|
"via DNS due to invalid characters. Valid characters include "+
|
||||||
"all alpha-numerics and dashes.", a.config.NodeName)
|
"all alpha-numerics and dashes.",
|
||||||
|
"node_name", a.config.NodeName,
|
||||||
|
)
|
||||||
} else if len(a.config.NodeName) > MaxDNSLabelLength {
|
} else if len(a.config.NodeName) > MaxDNSLabelLength {
|
||||||
a.logger.Printf("[WARN] agent: Node name %q will not be discoverable "+
|
a.logger.Warn("Node name will not be discoverable "+
|
||||||
"via DNS due to it being too long. Valid lengths are between "+
|
"via DNS due to it being too long. Valid lengths are between "+
|
||||||
"1 and 63 bytes.", a.config.NodeName)
|
"1 and 63 bytes.",
|
||||||
|
"node_name", a.config.NodeName,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// load the tokens - this requires the logger to be setup
|
// load the tokens - this requires the logger to be setup
|
||||||
|
@ -465,7 +466,7 @@ func (a *Agent) Start() error {
|
||||||
if err = a.setupClientAutoEncryptWatching(rootsReq, leafReq); err != nil {
|
if err = a.setupClientAutoEncryptWatching(rootsReq, leafReq); err != nil {
|
||||||
return fmt.Errorf("AutoEncrypt failed: %s", err)
|
return fmt.Errorf("AutoEncrypt failed: %s", err)
|
||||||
}
|
}
|
||||||
a.logger.Printf("[INFO] AutoEncrypt: upgraded to TLS")
|
a.logger.Info("automatically upgraded to TLS")
|
||||||
}
|
}
|
||||||
|
|
||||||
a.serviceManager.Start()
|
a.serviceManager.Start()
|
||||||
|
@ -484,7 +485,7 @@ func (a *Agent) Start() error {
|
||||||
// Start the proxy config manager.
|
// Start the proxy config manager.
|
||||||
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
|
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
|
||||||
Cache: a.cache,
|
Cache: a.cache,
|
||||||
Logger: a.logger,
|
Logger: a.logger.Named(logging.ProxyConfig),
|
||||||
State: a.State,
|
State: a.State,
|
||||||
Source: &structs.QuerySource{
|
Source: &structs.QuerySource{
|
||||||
Node: a.config.NodeName,
|
Node: a.config.NodeName,
|
||||||
|
@ -497,7 +498,7 @@ func (a *Agent) Start() error {
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
if err := a.proxyConfig.Run(); err != nil {
|
if err := a.proxyConfig.Run(); err != nil {
|
||||||
a.logger.Printf("[ERR] Proxy Config Manager exited: %s", err)
|
a.logger.Error("proxy config manager exited with error", "error", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -643,7 +644,10 @@ func (a *Agent) setupClientAutoEncryptWatching(rootsReq *structs.DCSpecificReque
|
||||||
roots, ok := u.Result.(*structs.IndexedCARoots)
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
||||||
if !ok {
|
if !ok {
|
||||||
err := fmt.Errorf("invalid type for roots response: %T", u.Result)
|
err := fmt.Errorf("invalid type for roots response: %T", u.Result)
|
||||||
a.logger.Printf("[ERR] %s watch error: %s", u.CorrelationID, err)
|
a.logger.Error("watch error for correlation id",
|
||||||
|
"correlation_id", u.CorrelationID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
pems := []string{}
|
pems := []string{}
|
||||||
|
@ -655,7 +659,10 @@ func (a *Agent) setupClientAutoEncryptWatching(rootsReq *structs.DCSpecificReque
|
||||||
leaf, ok := u.Result.(*structs.IssuedCert)
|
leaf, ok := u.Result.(*structs.IssuedCert)
|
||||||
if !ok {
|
if !ok {
|
||||||
err := fmt.Errorf("invalid type for leaf response: %T", u.Result)
|
err := fmt.Errorf("invalid type for leaf response: %T", u.Result)
|
||||||
a.logger.Printf("[ERR] %s watch error: %s", u.CorrelationID, err)
|
a.logger.Error("watch error for correlation id",
|
||||||
|
"correlation_id", u.CorrelationID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
a.tlsConfigurator.UpdateAutoEncryptCert(leaf.CertPEM, leaf.PrivateKeyPEM)
|
a.tlsConfigurator.UpdateAutoEncryptCert(leaf.CertPEM, leaf.PrivateKeyPEM)
|
||||||
|
@ -678,25 +685,26 @@ func (a *Agent) setupClientAutoEncryptWatching(rootsReq *structs.DCSpecificReque
|
||||||
// renew, but this case shouldn't happen because at
|
// renew, but this case shouldn't happen because at
|
||||||
// this point, auto_encrypt was just being setup
|
// this point, auto_encrypt was just being setup
|
||||||
// successfully.
|
// successfully.
|
||||||
|
autoLogger := a.logger.Named(logging.AutoEncrypt)
|
||||||
interval := a.tlsConfigurator.AutoEncryptCertNotAfter().Sub(time.Now().Add(10 * time.Second))
|
interval := a.tlsConfigurator.AutoEncryptCertNotAfter().Sub(time.Now().Add(10 * time.Second))
|
||||||
a.logger.Printf("[DEBUG] AutoEncrypt: client certificate expiration check in %s", interval)
|
a.logger.Debug("setting up client certificate expiration check on interval", "interval", interval)
|
||||||
select {
|
select {
|
||||||
case <-a.shutdownCh:
|
case <-a.shutdownCh:
|
||||||
return
|
return
|
||||||
case <-time.After(interval):
|
case <-time.After(interval):
|
||||||
// check auto encrypt client cert expiration
|
// check auto encrypt client cert expiration
|
||||||
if a.tlsConfigurator.AutoEncryptCertExpired() {
|
if a.tlsConfigurator.AutoEncryptCertExpired() {
|
||||||
a.logger.Printf("[DEBUG] AutoEncrypt: client certificate expired.")
|
autoLogger.Debug("client certificate expired.")
|
||||||
reply, err := a.setupClientAutoEncrypt()
|
reply, err := a.setupClientAutoEncrypt()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] AutoEncrypt: client certificate expired, failed to renew: %s", err)
|
autoLogger.Error("client certificate expired, failed to renew", "error", err)
|
||||||
// in case of an error, try again in one minute
|
// in case of an error, try again in one minute
|
||||||
interval = time.Minute
|
interval = time.Minute
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, _, err = a.setupClientAutoEncryptCache(reply)
|
_, _, err = a.setupClientAutoEncryptCache(reply)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] AutoEncrypt: client certificate expired, failed to populate cache: %s", err)
|
autoLogger.Error("client certificate expired, failed to populate cache", "error", err)
|
||||||
// in case of an error, try again in one minute
|
// in case of an error, try again in one minute
|
||||||
interval = time.Minute
|
interval = time.Minute
|
||||||
continue
|
continue
|
||||||
|
@ -743,11 +751,13 @@ func (a *Agent) listenAndServeGRPC() error {
|
||||||
|
|
||||||
for _, l := range ln {
|
for _, l := range ln {
|
||||||
go func(innerL net.Listener) {
|
go func(innerL net.Listener) {
|
||||||
a.logger.Printf("[INFO] agent: Started gRPC server on %s (%s)",
|
a.logger.Info("Started gRPC server",
|
||||||
innerL.Addr().String(), innerL.Addr().Network())
|
"address", innerL.Addr().String(),
|
||||||
|
"network", innerL.Addr().Network(),
|
||||||
|
)
|
||||||
err := a.grpcServer.Serve(innerL)
|
err := a.grpcServer.Serve(innerL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] gRPC server failed: %s", err)
|
a.logger.Error("gRPC server failed", "error", err)
|
||||||
}
|
}
|
||||||
}(l)
|
}(l)
|
||||||
}
|
}
|
||||||
|
@ -782,7 +792,10 @@ func (a *Agent) listenAndServeDNS() error {
|
||||||
for range a.config.DNSAddrs {
|
for range a.config.DNSAddrs {
|
||||||
select {
|
select {
|
||||||
case addr := <-notif:
|
case addr := <-notif:
|
||||||
a.logger.Printf("[INFO] agent: Started DNS server %s (%s)", addr.String(), addr.Network())
|
a.logger.Info("Started DNS server",
|
||||||
|
"address", addr.String(),
|
||||||
|
"network", addr.Network(),
|
||||||
|
)
|
||||||
|
|
||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
merr = multierror.Append(merr, err)
|
merr = multierror.Append(merr, err)
|
||||||
|
@ -913,7 +926,7 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
|
||||||
|
|
||||||
func (a *Agent) listenSocket(path string) (net.Listener, error) {
|
func (a *Agent) listenSocket(path string) (net.Listener, error) {
|
||||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||||
a.logger.Printf("[WARN] agent: Replacing socket %q", path)
|
a.logger.Warn("Replacing socket", "path", path)
|
||||||
}
|
}
|
||||||
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||||
return nil, fmt.Errorf("error removing socket file: %s", err)
|
return nil, fmt.Errorf("error removing socket file: %s", err)
|
||||||
|
@ -943,16 +956,22 @@ func (a *Agent) serveHTTP(srv *HTTPServer) error {
|
||||||
notif <- srv.ln.Addr()
|
notif <- srv.ln.Addr()
|
||||||
err := srv.Serve(srv.ln)
|
err := srv.Serve(srv.ln)
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
a.logger.Print(err)
|
a.logger.Error("error closing server", "error", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case addr := <-notif:
|
case addr := <-notif:
|
||||||
if srv.proto == "https" {
|
if srv.proto == "https" {
|
||||||
a.logger.Printf("[INFO] agent: Started HTTPS server on %s (%s)", addr.String(), addr.Network())
|
a.logger.Info("Started HTTPS server",
|
||||||
|
"address", addr.String(),
|
||||||
|
"network", addr.Network(),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[INFO] agent: Started HTTP server on %s (%s)", addr.String(), addr.Network())
|
a.logger.Info("Started HTTP server",
|
||||||
|
"address", addr.String(),
|
||||||
|
"network", addr.Network(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
|
@ -1008,7 +1027,7 @@ func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error {
|
||||||
handler, hasHandler := wp.Exempt["handler"]
|
handler, hasHandler := wp.Exempt["handler"]
|
||||||
args, hasArgs := wp.Exempt["args"]
|
args, hasArgs := wp.Exempt["args"]
|
||||||
if hasHandler {
|
if hasHandler {
|
||||||
a.logger.Printf("[WARN] agent: The 'handler' field in watches has been deprecated " +
|
a.logger.Warn("The 'handler' field in watches has been deprecated " +
|
||||||
"and replaced with the 'args' field. See https://www.consul.io/docs/agent/watches.html")
|
"and replaced with the 'args' field. See https://www.consul.io/docs/agent/watches.html")
|
||||||
}
|
}
|
||||||
if _, ok := handler.(string); hasHandler && !ok {
|
if _, ok := handler.(string); hasHandler && !ok {
|
||||||
|
@ -1043,19 +1062,19 @@ func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error {
|
||||||
for _, wp := range watchPlans {
|
for _, wp := range watchPlans {
|
||||||
config, err := a.config.APIConfig(true)
|
config, err := a.config.APIConfig(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to run watch: %v", err)
|
a.logger.Error("Failed to run watch", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a.watchPlans = append(a.watchPlans, wp)
|
a.watchPlans = append(a.watchPlans, wp)
|
||||||
go func(wp *watch.Plan) {
|
go func(wp *watch.Plan) {
|
||||||
if h, ok := wp.Exempt["handler"]; ok {
|
if h, ok := wp.Exempt["handler"]; ok {
|
||||||
wp.Handler = makeWatchHandler(a.LogOutput, h)
|
wp.Handler = makeWatchHandler(a.logger, h)
|
||||||
} else if h, ok := wp.Exempt["args"]; ok {
|
} else if h, ok := wp.Exempt["args"]; ok {
|
||||||
wp.Handler = makeWatchHandler(a.LogOutput, h)
|
wp.Handler = makeWatchHandler(a.logger, h)
|
||||||
} else {
|
} else {
|
||||||
httpConfig := wp.Exempt["http_handler_config"].(*watch.HttpHandlerConfig)
|
httpConfig := wp.Exempt["http_handler_config"].(*watch.HttpHandlerConfig)
|
||||||
wp.Handler = makeHTTPWatchHandler(a.LogOutput, httpConfig)
|
wp.Handler = makeHTTPWatchHandler(a.logger, httpConfig)
|
||||||
}
|
}
|
||||||
wp.LogOutput = a.LogOutput
|
wp.LogOutput = a.LogOutput
|
||||||
|
|
||||||
|
@ -1065,7 +1084,7 @@ func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wp.RunWithConfig(addr, config); err != nil {
|
if err := wp.RunWithConfig(addr, config); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to run watch: %v", err)
|
a.logger.Error("Failed to run watch", "error", err)
|
||||||
}
|
}
|
||||||
}(wp)
|
}(wp)
|
||||||
}
|
}
|
||||||
|
@ -1296,7 +1315,7 @@ func (a *Agent) consulConfig() (*consul.Config, error) {
|
||||||
// If the tried to specify an ID but typoed it don't ignore as they will
|
// If the tried to specify an ID but typoed it don't ignore as they will
|
||||||
// then bootstrap with a new ID and have to throw away the whole cluster
|
// then bootstrap with a new ID and have to throw away the whole cluster
|
||||||
// and start again.
|
// and start again.
|
||||||
a.logger.Println("[ERR] connect CA config cluster_id specified but " +
|
a.logger.Error("connect CA config cluster_id specified but " +
|
||||||
"is not a valid UUID, aborting startup")
|
"is not a valid UUID, aborting startup")
|
||||||
return nil, fmt.Errorf("cluster_id was supplied but was not a valid UUID")
|
return nil, fmt.Errorf("cluster_id was supplied but was not a valid UUID")
|
||||||
}
|
}
|
||||||
|
@ -1387,7 +1406,7 @@ func (a *Agent) makeRandomID() (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] agent: Using random ID %q as node ID", id)
|
a.logger.Debug("Using random ID as node ID", "id", id)
|
||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1405,7 +1424,7 @@ func (a *Agent) makeNodeID() (string, error) {
|
||||||
// Try to get a stable ID associated with the host itself.
|
// Try to get a stable ID associated with the host itself.
|
||||||
info, err := host.Info()
|
info, err := host.Info()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[DEBUG] agent: Couldn't get a unique ID from the host: %v", err)
|
a.logger.Debug("Couldn't get a unique ID from the host", "error", err)
|
||||||
return a.makeRandomID()
|
return a.makeRandomID()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1413,8 +1432,10 @@ func (a *Agent) makeNodeID() (string, error) {
|
||||||
// control over this process.
|
// control over this process.
|
||||||
id := strings.ToLower(info.HostID)
|
id := strings.ToLower(info.HostID)
|
||||||
if _, err := uuid.ParseUUID(id); err != nil {
|
if _, err := uuid.ParseUUID(id); err != nil {
|
||||||
a.logger.Printf("[DEBUG] agent: Unique ID %q from host isn't formatted as a UUID: %v",
|
a.logger.Debug("Unique ID from host isn't formatted as a UUID",
|
||||||
id, err)
|
"id", id,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return a.makeRandomID()
|
return a.makeRandomID()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1429,7 +1450,7 @@ func (a *Agent) makeNodeID() (string, error) {
|
||||||
buf[8:10],
|
buf[8:10],
|
||||||
buf[10:16])
|
buf[10:16])
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] agent: Using unique ID %q from host as node ID", id)
|
a.logger.Debug("Using unique ID from host as node ID", "id", id)
|
||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1638,7 +1659,7 @@ func (a *Agent) ShutdownAgent() error {
|
||||||
if a.shutdown {
|
if a.shutdown {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
a.logger.Println("[INFO] agent: Requesting shutdown")
|
a.logger.Info("Requesting shutdown")
|
||||||
|
|
||||||
// Stop the service manager (must happen before we take the stateLock to avoid deadlock)
|
// Stop the service manager (must happen before we take the stateLock to avoid deadlock)
|
||||||
if a.serviceManager != nil {
|
if a.serviceManager != nil {
|
||||||
|
@ -1689,18 +1710,18 @@ func (a *Agent) ShutdownAgent() error {
|
||||||
if a.delegate != nil {
|
if a.delegate != nil {
|
||||||
err = a.delegate.Shutdown()
|
err = a.delegate.Shutdown()
|
||||||
if _, ok := a.delegate.(*consul.Server); ok {
|
if _, ok := a.delegate.(*consul.Server); ok {
|
||||||
a.logger.Print("[INFO] agent: consul server down")
|
a.logger.Info("consul server down")
|
||||||
} else {
|
} else {
|
||||||
a.logger.Print("[INFO] agent: consul client down")
|
a.logger.Info("consul client down")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pidErr := a.deletePid()
|
pidErr := a.deletePid()
|
||||||
if pidErr != nil {
|
if pidErr != nil {
|
||||||
a.logger.Println("[WARN] agent: could not delete pid file ", pidErr)
|
a.logger.Warn("could not delete pid file", "error", pidErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Println("[INFO] agent: shutdown complete")
|
a.logger.Info("shutdown complete")
|
||||||
a.shutdown = true
|
a.shutdown = true
|
||||||
close(a.shutdownCh)
|
close(a.shutdownCh)
|
||||||
return err
|
return err
|
||||||
|
@ -1718,26 +1739,38 @@ func (a *Agent) ShutdownEndpoints() {
|
||||||
|
|
||||||
for _, srv := range a.dnsServers {
|
for _, srv := range a.dnsServers {
|
||||||
if srv.Server != nil {
|
if srv.Server != nil {
|
||||||
a.logger.Printf("[INFO] agent: Stopping DNS server %s (%s)", srv.Server.Addr, srv.Server.Net)
|
a.logger.Info("Stopping server",
|
||||||
|
"protocol", "DNS",
|
||||||
|
"address", srv.Server.Addr,
|
||||||
|
"network", srv.Server.Net,
|
||||||
|
)
|
||||||
srv.Shutdown()
|
srv.Shutdown()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.dnsServers = nil
|
a.dnsServers = nil
|
||||||
|
|
||||||
for _, srv := range a.httpServers {
|
for _, srv := range a.httpServers {
|
||||||
a.logger.Printf("[INFO] agent: Stopping %s server %s (%s)", strings.ToUpper(srv.proto), srv.ln.Addr().String(), srv.ln.Addr().Network())
|
a.logger.Info("Stopping server",
|
||||||
|
"protocol", strings.ToUpper(srv.proto),
|
||||||
|
"address", srv.ln.Addr().String(),
|
||||||
|
"network", srv.ln.Addr().Network(),
|
||||||
|
)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
srv.Shutdown(ctx)
|
srv.Shutdown(ctx)
|
||||||
if ctx.Err() == context.DeadlineExceeded {
|
if ctx.Err() == context.DeadlineExceeded {
|
||||||
a.logger.Printf("[WARN] agent: Timeout stopping %s server %s (%s)", strings.ToUpper(srv.proto), srv.ln.Addr().String(), srv.ln.Addr().Network())
|
a.logger.Warn("Timeout stopping server",
|
||||||
|
"protocol", strings.ToUpper(srv.proto),
|
||||||
|
"address", srv.ln.Addr().String(),
|
||||||
|
"network", srv.ln.Addr().Network(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.httpServers = nil
|
a.httpServers = nil
|
||||||
|
|
||||||
a.logger.Println("[INFO] agent: Waiting for endpoints to shut down")
|
a.logger.Info("Waiting for endpoints to shut down")
|
||||||
a.wgServers.Wait()
|
a.wgServers.Wait()
|
||||||
a.logger.Print("[INFO] agent: Endpoints down")
|
a.logger.Info("Endpoints down")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReloadCh is used to return a channel that can be
|
// ReloadCh is used to return a channel that can be
|
||||||
|
@ -1760,46 +1793,55 @@ func (a *Agent) ShutdownCh() <-chan struct{} {
|
||||||
|
|
||||||
// JoinLAN is used to have the agent join a LAN cluster
|
// JoinLAN is used to have the agent join a LAN cluster
|
||||||
func (a *Agent) JoinLAN(addrs []string) (n int, err error) {
|
func (a *Agent) JoinLAN(addrs []string) (n int, err error) {
|
||||||
a.logger.Printf("[INFO] agent: (LAN) joining: %v", addrs)
|
a.logger.Info("(LAN) joining", "lan_addresses", addrs)
|
||||||
n, err = a.delegate.JoinLAN(addrs)
|
n, err = a.delegate.JoinLAN(addrs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
a.logger.Printf("[INFO] agent: (LAN) joined: %d", n)
|
a.logger.Info("(LAN) joined", "number_of_nodes", n)
|
||||||
if a.joinLANNotifier != nil {
|
if a.joinLANNotifier != nil {
|
||||||
if notifErr := a.joinLANNotifier.Notify(systemd.Ready); notifErr != nil {
|
if notifErr := a.joinLANNotifier.Notify(systemd.Ready); notifErr != nil {
|
||||||
a.logger.Printf("[DEBUG] agent: systemd notify failed: %v", notifErr)
|
a.logger.Debug("systemd notify failed", "error", notifErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[WARN] agent: (LAN) couldn't join: %d Err: %v", n, err)
|
a.logger.Warn("(LAN) couldn't join",
|
||||||
|
"number_of_nodes", n,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// JoinWAN is used to have the agent join a WAN cluster
|
// JoinWAN is used to have the agent join a WAN cluster
|
||||||
func (a *Agent) JoinWAN(addrs []string) (n int, err error) {
|
func (a *Agent) JoinWAN(addrs []string) (n int, err error) {
|
||||||
a.logger.Printf("[INFO] agent: (WAN) joining: %v", addrs)
|
a.logger.Info("(WAN) joining", "wan_addresses", addrs)
|
||||||
if srv, ok := a.delegate.(*consul.Server); ok {
|
if srv, ok := a.delegate.(*consul.Server); ok {
|
||||||
n, err = srv.JoinWAN(addrs)
|
n, err = srv.JoinWAN(addrs)
|
||||||
} else {
|
} else {
|
||||||
err = fmt.Errorf("Must be a server to join WAN cluster")
|
err = fmt.Errorf("Must be a server to join WAN cluster")
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
a.logger.Printf("[INFO] agent: (WAN) joined: %d", n)
|
a.logger.Info("(WAN) joined", "number_of_nodes", n)
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[WARN] agent: (WAN) couldn't join: %d Err: %v", n, err)
|
a.logger.Warn("(WAN) couldn't join",
|
||||||
|
"number_of_nodes", n,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForceLeave is used to remove a failed node from the cluster
|
// ForceLeave is used to remove a failed node from the cluster
|
||||||
func (a *Agent) ForceLeave(node string, prune bool) (err error) {
|
func (a *Agent) ForceLeave(node string, prune bool) (err error) {
|
||||||
a.logger.Printf("[INFO] agent: Force leaving node: %v", node)
|
a.logger.Info("Force leaving node", "node", node)
|
||||||
if ok := a.IsMember(node); !ok {
|
if ok := a.IsMember(node); !ok {
|
||||||
return fmt.Errorf("agent: No node found with name '%s'", node)
|
return fmt.Errorf("agent: No node found with name '%s'", node)
|
||||||
}
|
}
|
||||||
err = a.delegate.RemoveFailedNode(node, prune)
|
err = a.delegate.RemoveFailedNode(node, prune)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[WARN] agent: Failed to remove node: %v", err)
|
a.logger.Warn("Failed to remove node",
|
||||||
|
"node", node,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1838,7 +1880,7 @@ func (a *Agent) IsMember(nodeName string) bool {
|
||||||
// This is called to prevent a race between clients and the anti-entropy routines
|
// This is called to prevent a race between clients and the anti-entropy routines
|
||||||
func (a *Agent) StartSync() {
|
func (a *Agent) StartSync() {
|
||||||
go a.sync.Run()
|
go a.sync.Run()
|
||||||
a.logger.Printf("[INFO] agent: started state syncer")
|
a.logger.Info("started state syncer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// PauseSync is used to pause anti-entropy while bulk changes are made. It also
|
// PauseSync is used to pause anti-entropy while bulk changes are made. It also
|
||||||
|
@ -1911,17 +1953,17 @@ OUTER:
|
||||||
members := a.LANMembers()
|
members := a.LANMembers()
|
||||||
grok, err := consul.CanServersUnderstandProtocol(members, 3)
|
grok, err := consul.CanServersUnderstandProtocol(members, 3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to check servers: %s", err)
|
a.logger.Error("Failed to check servers", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !grok {
|
if !grok {
|
||||||
a.logger.Printf("[DEBUG] agent: Skipping coordinate updates until servers are upgraded")
|
a.logger.Debug("Skipping coordinate updates until servers are upgraded")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
cs, err := a.GetLANCoordinate()
|
cs, err := a.GetLANCoordinate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to get coordinate: %s", err)
|
a.logger.Error("Failed to get coordinate", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1940,9 +1982,9 @@ OUTER:
|
||||||
if err := a.RPC("Coordinate.Update", &req, &reply); err != nil {
|
if err := a.RPC("Coordinate.Update", &req, &reply); err != nil {
|
||||||
if acl.IsErrPermissionDenied(err) {
|
if acl.IsErrPermissionDenied(err) {
|
||||||
accessorID := a.aclAccessorID(agentToken)
|
accessorID := a.aclAccessorID(agentToken)
|
||||||
a.logger.Printf("[DEBUG] agent: Coordinate update blocked by ACLs, accessorID=%v", accessorID)
|
a.logger.Warn("Coordinate update blocked by ACLs", "accesorID", accessorID)
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[ERR] agent: Coordinate update error: %v", err)
|
a.logger.Error("Coordinate update error", "error", err)
|
||||||
}
|
}
|
||||||
continue OUTER
|
continue OUTER
|
||||||
}
|
}
|
||||||
|
@ -1981,11 +2023,15 @@ func (a *Agent) reapServicesInternal() {
|
||||||
if timeout > 0 && cs.CriticalFor() > timeout {
|
if timeout > 0 && cs.CriticalFor() > timeout {
|
||||||
reaped[serviceID] = true
|
reaped[serviceID] = true
|
||||||
if err := a.RemoveService(serviceID); err != nil {
|
if err := a.RemoveService(serviceID); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: unable to deregister service %q after check %q has been critical for too long: %s",
|
a.logger.Error("unable to deregister service after check has been critical for too long",
|
||||||
serviceID, checkID, err)
|
"service", serviceID.String(),
|
||||||
|
"check", checkID.String(),
|
||||||
|
"error", err)
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[INFO] agent: Check %q for service %q has been critical for too long; deregistered service",
|
a.logger.Info("Check for service has been critical for too long; deregistered service",
|
||||||
checkID, serviceID)
|
"service", serviceID.String(),
|
||||||
|
"check", checkID.String(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2132,7 +2178,7 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se
|
||||||
|
|
||||||
// Skip all partially written temporary files
|
// Skip all partially written temporary files
|
||||||
if strings.HasSuffix(fi.Name(), "tmp") {
|
if strings.HasSuffix(fi.Name(), "tmp") {
|
||||||
a.logger.Printf("[WARN] agent: Ignoring temporary service config file %v", fi.Name())
|
a.logger.Warn("Ignoring temporary service config file", "file", fi.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2146,7 +2192,10 @@ func (a *Agent) readPersistedServiceConfigs() (map[structs.ServiceID]*structs.Se
|
||||||
// Try decoding the service config definition
|
// Try decoding the service config definition
|
||||||
var p persistedServiceConfig
|
var p persistedServiceConfig
|
||||||
if err := json.Unmarshal(buf, &p); err != nil {
|
if err := json.Unmarshal(buf, &p); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed decoding service config file %q: %s", file, err)
|
a.logger.Error("Failed decoding service config file",
|
||||||
|
"file", file,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
out[structs.NewServiceID(p.ServiceID, &p.EnterpriseMeta)] = p.Defaults
|
out[structs.NewServiceID(p.ServiceID, &p.EnterpriseMeta)] = p.Defaults
|
||||||
|
@ -2394,7 +2443,7 @@ func (a *Agent) addServiceInternal(req *addServiceRequest) error {
|
||||||
if service.Proxy.Expose.Checks {
|
if service.Proxy.Expose.Checks {
|
||||||
err := a.rerouteExposedChecks(psid, service.Proxy.LocalServiceAddress)
|
err := a.rerouteExposedChecks(psid, service.Proxy.LocalServiceAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Println("[WARN] failed to reroute L7 checks to exposed proxy listener")
|
a.logger.Warn("failed to reroute L7 checks to exposed proxy listener")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Reset check targets if proxy was re-registered but no longer wants to expose checks
|
// Reset check targets if proxy was re-registered but no longer wants to expose checks
|
||||||
|
@ -2462,25 +2511,33 @@ func (a *Agent) validateService(service *structs.NodeService, chkTypes []*struct
|
||||||
|
|
||||||
// Warn if the service name is incompatible with DNS
|
// Warn if the service name is incompatible with DNS
|
||||||
if InvalidDnsRe.MatchString(service.Service) {
|
if InvalidDnsRe.MatchString(service.Service) {
|
||||||
a.logger.Printf("[WARN] agent: Service name %q will not be discoverable "+
|
a.logger.Warn("Service name will not be discoverable "+
|
||||||
"via DNS due to invalid characters. Valid characters include "+
|
"via DNS due to invalid characters. Valid characters include "+
|
||||||
"all alpha-numerics and dashes.", service.Service)
|
"all alpha-numerics and dashes.",
|
||||||
|
"service", service.Service,
|
||||||
|
)
|
||||||
} else if len(service.Service) > MaxDNSLabelLength {
|
} else if len(service.Service) > MaxDNSLabelLength {
|
||||||
a.logger.Printf("[WARN] agent: Service name %q will not be discoverable "+
|
a.logger.Warn("Service name will not be discoverable "+
|
||||||
"via DNS due to it being too long. Valid lengths are between "+
|
"via DNS due to it being too long. Valid lengths are between "+
|
||||||
"1 and 63 bytes.", service.Service)
|
"1 and 63 bytes.",
|
||||||
|
"service", service.Service,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn if any tags are incompatible with DNS
|
// Warn if any tags are incompatible with DNS
|
||||||
for _, tag := range service.Tags {
|
for _, tag := range service.Tags {
|
||||||
if InvalidDnsRe.MatchString(tag) {
|
if InvalidDnsRe.MatchString(tag) {
|
||||||
a.logger.Printf("[DEBUG] agent: Service tag %q will not be discoverable "+
|
a.logger.Debug("Service tag will not be discoverable "+
|
||||||
"via DNS due to invalid characters. Valid characters include "+
|
"via DNS due to invalid characters. Valid characters include "+
|
||||||
"all alpha-numerics and dashes.", tag)
|
"all alpha-numerics and dashes.",
|
||||||
|
"tag", tag,
|
||||||
|
)
|
||||||
} else if len(tag) > MaxDNSLabelLength {
|
} else if len(tag) > MaxDNSLabelLength {
|
||||||
a.logger.Printf("[DEBUG] agent: Service tag %q will not be discoverable "+
|
a.logger.Debug("Service tag will not be discoverable "+
|
||||||
"via DNS due to it being too long. Valid lengths are between "+
|
"via DNS due to it being too long. Valid lengths are between "+
|
||||||
"1 and 63 bytes.", tag)
|
"1 and 63 bytes.",
|
||||||
|
"tag", tag,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2520,26 +2577,41 @@ func (a *Agent) validateService(service *structs.NodeService, chkTypes []*struct
|
||||||
func (a *Agent) cleanupRegistration(serviceIDs []structs.ServiceID, checksIDs []structs.CheckID) {
|
func (a *Agent) cleanupRegistration(serviceIDs []structs.ServiceID, checksIDs []structs.CheckID) {
|
||||||
for _, s := range serviceIDs {
|
for _, s := range serviceIDs {
|
||||||
if err := a.State.RemoveService(s); err != nil {
|
if err := a.State.RemoveService(s); err != nil {
|
||||||
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to remove service %s: %s", s, err)
|
a.logger.Error("failed to remove service during cleanup",
|
||||||
|
"service", s.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if err := a.purgeService(s); err != nil {
|
if err := a.purgeService(s); err != nil {
|
||||||
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to purge service %s file: %s", s, err)
|
a.logger.Error("failed to purge service file during cleanup",
|
||||||
|
"service", s.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if err := a.purgeServiceConfig(s); err != nil {
|
if err := a.purgeServiceConfig(s); err != nil {
|
||||||
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to purge service config %s file: %s", s, err)
|
a.logger.Error("failed to purge service config file during cleanup",
|
||||||
|
"service", s,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if err := a.removeServiceSidecars(s, true); err != nil {
|
if err := a.removeServiceSidecars(s, true); err != nil {
|
||||||
a.logger.Printf("[ERR] consul: service registration: cleanup: failed remove sidecars for %s: %s", s, err)
|
a.logger.Error("service registration: cleanup: failed remove sidecars for", "service", s, "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range checksIDs {
|
for _, c := range checksIDs {
|
||||||
a.cancelCheckMonitors(c)
|
a.cancelCheckMonitors(c)
|
||||||
if err := a.State.RemoveCheck(c); err != nil {
|
if err := a.State.RemoveCheck(c); err != nil {
|
||||||
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to remove check %s: %s", c, err)
|
a.logger.Error("failed to remove check during cleanup",
|
||||||
|
"check", c.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if err := a.purgeCheck(c); err != nil {
|
if err := a.purgeCheck(c); err != nil {
|
||||||
a.logger.Printf("[ERR] consul: service registration: cleanup: failed to purge check %s file: %s", c, err)
|
a.logger.Error("failed to purge check file during cleanup",
|
||||||
|
"check", c.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2587,7 +2659,10 @@ func (a *Agent) removeServiceLocked(serviceID structs.ServiceID, persist bool) e
|
||||||
|
|
||||||
// Remove service immediately
|
// Remove service immediately
|
||||||
if err := a.State.RemoveServiceWithChecks(serviceID, checkIDs); err != nil {
|
if err := a.State.RemoveServiceWithChecks(serviceID, checkIDs); err != nil {
|
||||||
a.logger.Printf("[WARN] agent: Failed to deregister service %q: %s", serviceID, err)
|
a.logger.Warn("Failed to deregister service",
|
||||||
|
"service", serviceID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2608,7 +2683,7 @@ func (a *Agent) removeServiceLocked(serviceID structs.ServiceID, persist bool) e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] agent: removed service %q", serviceID.String())
|
a.logger.Debug("removed service", "service", serviceID.String())
|
||||||
|
|
||||||
// If any Sidecar services exist for the removed service ID, remove them too.
|
// If any Sidecar services exist for the removed service ID, remove them too.
|
||||||
return a.removeServiceSidecars(serviceID, persist)
|
return a.removeServiceSidecars(serviceID, persist)
|
||||||
|
@ -2762,8 +2837,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
|
|
||||||
// Restore persisted state, if any
|
// Restore persisted state, if any
|
||||||
if err := a.loadCheckState(check); err != nil {
|
if err := a.loadCheckState(check); err != nil {
|
||||||
a.logger.Printf("[WARN] agent: failed restoring state for check %q: %s",
|
a.logger.Warn("failed restoring state for check",
|
||||||
cid, err)
|
"check", cid.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
ttl.Start()
|
ttl.Start()
|
||||||
|
@ -2775,8 +2852,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
delete(a.checkHTTPs, cid)
|
delete(a.checkHTTPs, cid)
|
||||||
}
|
}
|
||||||
if chkType.Interval < checks.MinInterval {
|
if chkType.Interval < checks.MinInterval {
|
||||||
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
a.logger.Warn("check has interval below minimum",
|
||||||
cid, checks.MinInterval))
|
"check", cid.String(),
|
||||||
|
"minimum_interval", checks.MinInterval,
|
||||||
|
)
|
||||||
chkType.Interval = checks.MinInterval
|
chkType.Interval = checks.MinInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2799,7 +2878,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
if proxy != nil && proxy.Proxy.Expose.Checks {
|
if proxy != nil && proxy.Proxy.Expose.Checks {
|
||||||
port, err := a.listenerPortLocked(sid, cid)
|
port, err := a.listenerPortLocked(sid, cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: error exposing check: %s", err)
|
a.logger.Error("error exposing check",
|
||||||
|
"check", cid.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
http.ProxyHTTP = httpInjectAddr(http.HTTP, proxy.Proxy.LocalServiceAddress, port)
|
http.ProxyHTTP = httpInjectAddr(http.HTTP, proxy.Proxy.LocalServiceAddress, port)
|
||||||
|
@ -2814,8 +2896,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
delete(a.checkTCPs, cid)
|
delete(a.checkTCPs, cid)
|
||||||
}
|
}
|
||||||
if chkType.Interval < checks.MinInterval {
|
if chkType.Interval < checks.MinInterval {
|
||||||
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
a.logger.Warn("check has interval below minimum",
|
||||||
cid, checks.MinInterval))
|
"check", cid.String(),
|
||||||
|
"minimum_interval", checks.MinInterval,
|
||||||
|
)
|
||||||
chkType.Interval = checks.MinInterval
|
chkType.Interval = checks.MinInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2837,8 +2921,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
delete(a.checkGRPCs, cid)
|
delete(a.checkGRPCs, cid)
|
||||||
}
|
}
|
||||||
if chkType.Interval < checks.MinInterval {
|
if chkType.Interval < checks.MinInterval {
|
||||||
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
a.logger.Warn("check has interval below minimum",
|
||||||
cid, checks.MinInterval))
|
"check", cid.String(),
|
||||||
|
"minimum_interval", checks.MinInterval,
|
||||||
|
)
|
||||||
chkType.Interval = checks.MinInterval
|
chkType.Interval = checks.MinInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2861,7 +2947,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
if proxy != nil && proxy.Proxy.Expose.Checks {
|
if proxy != nil && proxy.Proxy.Expose.Checks {
|
||||||
port, err := a.listenerPortLocked(sid, cid)
|
port, err := a.listenerPortLocked(sid, cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: error exposing check: %s", err)
|
a.logger.Error("error exposing check",
|
||||||
|
"check", cid.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
grpc.ProxyGRPC = grpcInjectAddr(grpc.GRPC, proxy.Proxy.LocalServiceAddress, port)
|
grpc.ProxyGRPC = grpcInjectAddr(grpc.GRPC, proxy.Proxy.LocalServiceAddress, port)
|
||||||
|
@ -2876,18 +2965,20 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
delete(a.checkDockers, cid)
|
delete(a.checkDockers, cid)
|
||||||
}
|
}
|
||||||
if chkType.Interval < checks.MinInterval {
|
if chkType.Interval < checks.MinInterval {
|
||||||
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",
|
a.logger.Warn("check has interval below minimum",
|
||||||
cid, checks.MinInterval))
|
"check", cid.String(),
|
||||||
|
"minimum_interval", checks.MinInterval,
|
||||||
|
)
|
||||||
chkType.Interval = checks.MinInterval
|
chkType.Interval = checks.MinInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.dockerClient == nil {
|
if a.dockerClient == nil {
|
||||||
dc, err := checks.NewDockerClient(os.Getenv("DOCKER_HOST"), int64(maxOutputSize))
|
dc, err := checks.NewDockerClient(os.Getenv("DOCKER_HOST"), int64(maxOutputSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: error creating docker client: %s", err)
|
a.logger.Error("error creating docker client", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
a.logger.Printf("[DEBUG] agent: created docker client for %s", dc.Host())
|
a.logger.Debug("created docker client", "host", dc.Host())
|
||||||
a.dockerClient = dc
|
a.dockerClient = dc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2914,8 +3005,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
delete(a.checkMonitors, cid)
|
delete(a.checkMonitors, cid)
|
||||||
}
|
}
|
||||||
if chkType.Interval < checks.MinInterval {
|
if chkType.Interval < checks.MinInterval {
|
||||||
a.logger.Printf("[WARN] agent: check '%s' has interval below minimum of %v",
|
a.logger.Warn("check has interval below minimum",
|
||||||
cid, checks.MinInterval)
|
"check", cid.String(),
|
||||||
|
"minimum_interval", checks.MinInterval,
|
||||||
|
)
|
||||||
chkType.Interval = checks.MinInterval
|
chkType.Interval = checks.MinInterval
|
||||||
}
|
}
|
||||||
monitor := &checks.CheckMonitor{
|
monitor := &checks.CheckMonitor{
|
||||||
|
@ -2982,8 +3075,10 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||||
timeout := chkType.DeregisterCriticalServiceAfter
|
timeout := chkType.DeregisterCriticalServiceAfter
|
||||||
if timeout < a.config.CheckDeregisterIntervalMin {
|
if timeout < a.config.CheckDeregisterIntervalMin {
|
||||||
timeout = a.config.CheckDeregisterIntervalMin
|
timeout = a.config.CheckDeregisterIntervalMin
|
||||||
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has deregister interval below minimum of %v",
|
a.logger.Warn("check has deregister interval below minimum",
|
||||||
cid, a.config.CheckDeregisterIntervalMin))
|
"check", cid.String(),
|
||||||
|
"minimum_interval", a.config.CheckDeregisterIntervalMin,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
a.checkReapAfter[cid] = timeout
|
a.checkReapAfter[cid] = timeout
|
||||||
} else {
|
} else {
|
||||||
|
@ -3042,7 +3137,7 @@ func (a *Agent) removeCheckLocked(checkID structs.CheckID, persist bool) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] agent: removed check %q", checkID.String())
|
a.logger.Debug("removed check", "check", checkID.String())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3221,13 +3316,13 @@ func (a *Agent) loadCheckState(check *structs.HealthCheck) error {
|
||||||
// Decode the state data
|
// Decode the state data
|
||||||
var p persistedCheckState
|
var p persistedCheckState
|
||||||
if err := json.Unmarshal(buf, &p); err != nil {
|
if err := json.Unmarshal(buf, &p); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed decoding check state: %s", err)
|
a.logger.Error("failed decoding check state", "error", err)
|
||||||
return a.purgeCheckState(cid)
|
return a.purgeCheckState(cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the state has expired
|
// Check if the state has expired
|
||||||
if time.Now().Unix() >= p.Expires {
|
if time.Now().Unix() >= p.Expires {
|
||||||
a.logger.Printf("[DEBUG] agent: check state expired for %q, not restoring", cid.String())
|
a.logger.Debug("check state expired, not restoring", "check", cid.String())
|
||||||
return a.purgeCheckState(cid)
|
return a.purgeCheckState(cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3403,7 +3498,7 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error {
|
||||||
|
|
||||||
// Skip all partially written temporary files
|
// Skip all partially written temporary files
|
||||||
if strings.HasSuffix(fi.Name(), "tmp") {
|
if strings.HasSuffix(fi.Name(), "tmp") {
|
||||||
a.logger.Printf("[WARN] agent: Ignoring temporary service file %v", fi.Name())
|
a.logger.Warn("Ignoring temporary service file", "file", fi.Name())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3419,7 +3514,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error {
|
||||||
if err := json.Unmarshal(buf, &p); err != nil {
|
if err := json.Unmarshal(buf, &p); err != nil {
|
||||||
// Backwards-compatibility for pre-0.5.1 persisted services
|
// Backwards-compatibility for pre-0.5.1 persisted services
|
||||||
if err := json.Unmarshal(buf, &p.Service); err != nil {
|
if err := json.Unmarshal(buf, &p.Service); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed decoding service file %q: %s", file, err)
|
a.logger.Error("Failed decoding service file",
|
||||||
|
"file", file,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3427,7 +3525,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error {
|
||||||
|
|
||||||
source, ok := ConfigSourceFromName(p.Source)
|
source, ok := ConfigSourceFromName(p.Source)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.logger.Printf("[WARN] agent: service %q exists with invalid source %q, purging", serviceID, p.Source)
|
a.logger.Warn("service exists with invalid source, purging",
|
||||||
|
"service", serviceID.String(),
|
||||||
|
"source", p.Source,
|
||||||
|
)
|
||||||
if err := a.purgeService(serviceID); err != nil {
|
if err := a.purgeService(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service %q: %s", serviceID, err)
|
return fmt.Errorf("failed purging service %q: %s", serviceID, err)
|
||||||
}
|
}
|
||||||
|
@ -3440,8 +3541,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error {
|
||||||
if a.State.Service(serviceID) != nil {
|
if a.State.Service(serviceID) != nil {
|
||||||
// Purge previously persisted service. This allows config to be
|
// Purge previously persisted service. This allows config to be
|
||||||
// preferred over services persisted from the API.
|
// preferred over services persisted from the API.
|
||||||
a.logger.Printf("[DEBUG] agent: service %q exists, not restoring from %q",
|
a.logger.Debug("service exists, not restoring from file",
|
||||||
serviceID.String(), file)
|
"service", serviceID.String(),
|
||||||
|
"file", file,
|
||||||
|
)
|
||||||
if err := a.purgeService(serviceID); err != nil {
|
if err := a.purgeService(serviceID); err != nil {
|
||||||
return fmt.Errorf("failed purging service %q: %s", serviceID.String(), err)
|
return fmt.Errorf("failed purging service %q: %s", serviceID.String(), err)
|
||||||
}
|
}
|
||||||
|
@ -3449,8 +3552,10 @@ func (a *Agent) loadServices(conf *config.RuntimeConfig) error {
|
||||||
return fmt.Errorf("failed purging service config %q: %s", serviceID.String(), err)
|
return fmt.Errorf("failed purging service config %q: %s", serviceID.String(), err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[DEBUG] agent: restored service definition %q from %q",
|
a.logger.Debug("restored service definition from file",
|
||||||
serviceID.String(), file)
|
"service", serviceID.String(),
|
||||||
|
"file", file,
|
||||||
|
)
|
||||||
err = a.addServiceLocked(&addServiceRequest{
|
err = a.addServiceLocked(&addServiceRequest{
|
||||||
service: p.Service,
|
service: p.Service,
|
||||||
chkTypes: nil,
|
chkTypes: nil,
|
||||||
|
@ -3535,14 +3640,20 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
// Decode the check
|
// Decode the check
|
||||||
var p persistedCheck
|
var p persistedCheck
|
||||||
if err := json.Unmarshal(buf, &p); err != nil {
|
if err := json.Unmarshal(buf, &p); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed decoding check file %q: %s", file, err)
|
a.logger.Error("Failed decoding check file",
|
||||||
|
"file", file,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
checkID := p.Check.CompoundCheckID()
|
checkID := p.Check.CompoundCheckID()
|
||||||
|
|
||||||
source, ok := ConfigSourceFromName(p.Source)
|
source, ok := ConfigSourceFromName(p.Source)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.logger.Printf("[WARN] agent: check %q exists with invalid source %q, purging", checkID, p.Source)
|
a.logger.Warn("check exists with invalid source, purging",
|
||||||
|
"check", checkID.String(),
|
||||||
|
"source", p.Source,
|
||||||
|
)
|
||||||
if err := a.purgeCheck(checkID); err != nil {
|
if err := a.purgeCheck(checkID); err != nil {
|
||||||
return fmt.Errorf("failed purging check %q: %s", checkID, err)
|
return fmt.Errorf("failed purging check %q: %s", checkID, err)
|
||||||
}
|
}
|
||||||
|
@ -3552,8 +3663,10 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
if a.State.Check(checkID) != nil {
|
if a.State.Check(checkID) != nil {
|
||||||
// Purge previously persisted check. This allows config to be
|
// Purge previously persisted check. This allows config to be
|
||||||
// preferred over persisted checks from the API.
|
// preferred over persisted checks from the API.
|
||||||
a.logger.Printf("[DEBUG] agent: check %q exists, not restoring from %q",
|
a.logger.Debug("check exists, not restoring from file",
|
||||||
checkID.String(), file)
|
"check", checkID.String(),
|
||||||
|
"file", file,
|
||||||
|
)
|
||||||
if err := a.purgeCheck(checkID); err != nil {
|
if err := a.purgeCheck(checkID); err != nil {
|
||||||
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
||||||
}
|
}
|
||||||
|
@ -3570,14 +3683,18 @@ func (a *Agent) loadChecks(conf *config.RuntimeConfig, snap map[structs.CheckID]
|
||||||
|
|
||||||
if err := a.addCheckLocked(p.Check, p.ChkType, false, p.Token, source); err != nil {
|
if err := a.addCheckLocked(p.Check, p.ChkType, false, p.Token, source); err != nil {
|
||||||
// Purge the check if it is unable to be restored.
|
// Purge the check if it is unable to be restored.
|
||||||
a.logger.Printf("[WARN] agent: Failed to restore check %q: %s",
|
a.logger.Warn("Failed to restore check",
|
||||||
checkID, err)
|
"check", checkID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
if err := a.purgeCheck(checkID); err != nil {
|
if err := a.purgeCheck(checkID); err != nil {
|
||||||
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
return fmt.Errorf("Failed purging check %q: %s", checkID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.logger.Printf("[DEBUG] agent: restored health check %q from %q",
|
a.logger.Debug("restored health check from file",
|
||||||
p.Check.CheckID, file)
|
"check", p.Check.CheckID,
|
||||||
|
"file", file,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3632,14 +3749,14 @@ func (a *Agent) loadTokens(conf *config.RuntimeConfig) error {
|
||||||
persistedTokens, persistenceErr := a.getPersistedTokens()
|
persistedTokens, persistenceErr := a.getPersistedTokens()
|
||||||
|
|
||||||
if persistenceErr != nil {
|
if persistenceErr != nil {
|
||||||
a.logger.Printf("[WARN] unable to load persisted tokens: %v", persistenceErr)
|
a.logger.Warn("unable to load persisted tokens", "error", persistenceErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if persistedTokens.Default != "" {
|
if persistedTokens.Default != "" {
|
||||||
a.tokens.UpdateUserToken(persistedTokens.Default, token.TokenSourceAPI)
|
a.tokens.UpdateUserToken(persistedTokens.Default, token.TokenSourceAPI)
|
||||||
|
|
||||||
if conf.ACLToken != "" {
|
if conf.ACLToken != "" {
|
||||||
a.logger.Printf("[WARN] \"default\" token present in both the configuration and persisted token store, using the persisted token")
|
a.logger.Warn("\"default\" token present in both the configuration and persisted token store, using the persisted token")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.tokens.UpdateUserToken(conf.ACLToken, token.TokenSourceConfig)
|
a.tokens.UpdateUserToken(conf.ACLToken, token.TokenSourceConfig)
|
||||||
|
@ -3649,7 +3766,7 @@ func (a *Agent) loadTokens(conf *config.RuntimeConfig) error {
|
||||||
a.tokens.UpdateAgentToken(persistedTokens.Agent, token.TokenSourceAPI)
|
a.tokens.UpdateAgentToken(persistedTokens.Agent, token.TokenSourceAPI)
|
||||||
|
|
||||||
if conf.ACLAgentToken != "" {
|
if conf.ACLAgentToken != "" {
|
||||||
a.logger.Printf("[WARN] \"agent\" token present in both the configuration and persisted token store, using the persisted token")
|
a.logger.Warn("\"agent\" token present in both the configuration and persisted token store, using the persisted token")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.tokens.UpdateAgentToken(conf.ACLAgentToken, token.TokenSourceConfig)
|
a.tokens.UpdateAgentToken(conf.ACLAgentToken, token.TokenSourceConfig)
|
||||||
|
@ -3659,7 +3776,7 @@ func (a *Agent) loadTokens(conf *config.RuntimeConfig) error {
|
||||||
a.tokens.UpdateAgentMasterToken(persistedTokens.AgentMaster, token.TokenSourceAPI)
|
a.tokens.UpdateAgentMasterToken(persistedTokens.AgentMaster, token.TokenSourceAPI)
|
||||||
|
|
||||||
if conf.ACLAgentMasterToken != "" {
|
if conf.ACLAgentMasterToken != "" {
|
||||||
a.logger.Printf("[WARN] \"agent_master\" token present in both the configuration and persisted token store, using the persisted token")
|
a.logger.Warn("\"agent_master\" token present in both the configuration and persisted token store, using the persisted token")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.tokens.UpdateAgentMasterToken(conf.ACLAgentMasterToken, token.TokenSourceConfig)
|
a.tokens.UpdateAgentMasterToken(conf.ACLAgentMasterToken, token.TokenSourceConfig)
|
||||||
|
@ -3669,7 +3786,7 @@ func (a *Agent) loadTokens(conf *config.RuntimeConfig) error {
|
||||||
a.tokens.UpdateReplicationToken(persistedTokens.Replication, token.TokenSourceAPI)
|
a.tokens.UpdateReplicationToken(persistedTokens.Replication, token.TokenSourceAPI)
|
||||||
|
|
||||||
if conf.ACLReplicationToken != "" {
|
if conf.ACLReplicationToken != "" {
|
||||||
a.logger.Printf("[WARN] \"replication\" token present in both the configuration and persisted token store, using the persisted token")
|
a.logger.Warn("\"replication\" token present in both the configuration and persisted token store, using the persisted token")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.tokens.UpdateReplicationToken(conf.ACLReplicationToken, token.TokenSourceConfig)
|
a.tokens.UpdateReplicationToken(conf.ACLReplicationToken, token.TokenSourceConfig)
|
||||||
|
@ -3740,7 +3857,7 @@ func (a *Agent) EnableServiceMaintenance(serviceID structs.ServiceID, reason, to
|
||||||
EnterpriseMeta: checkID.EnterpriseMeta,
|
EnterpriseMeta: checkID.EnterpriseMeta,
|
||||||
}
|
}
|
||||||
a.AddCheck(check, nil, true, token, ConfigSourceLocal)
|
a.AddCheck(check, nil, true, token, ConfigSourceLocal)
|
||||||
a.logger.Printf("[INFO] agent: Service %q entered maintenance mode", serviceID.String())
|
a.logger.Info("Service entered maintenance mode", "service", serviceID.String())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -3761,7 +3878,7 @@ func (a *Agent) DisableServiceMaintenance(serviceID structs.ServiceID) error {
|
||||||
|
|
||||||
// Deregister the maintenance check
|
// Deregister the maintenance check
|
||||||
a.RemoveCheck(checkID, true)
|
a.RemoveCheck(checkID, true)
|
||||||
a.logger.Printf("[INFO] agent: Service %q left maintenance mode", serviceID.String())
|
a.logger.Info("Service left maintenance mode", "service", serviceID.String())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -3788,7 +3905,7 @@ func (a *Agent) EnableNodeMaintenance(reason, token string) {
|
||||||
Type: "maintenance",
|
Type: "maintenance",
|
||||||
}
|
}
|
||||||
a.AddCheck(check, nil, true, token, ConfigSourceLocal)
|
a.AddCheck(check, nil, true, token, ConfigSourceLocal)
|
||||||
a.logger.Printf("[INFO] agent: Node entered maintenance mode")
|
a.logger.Info("Node entered maintenance mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisableNodeMaintenance removes a node from maintenance mode
|
// DisableNodeMaintenance removes a node from maintenance mode
|
||||||
|
@ -3797,7 +3914,7 @@ func (a *Agent) DisableNodeMaintenance() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
a.RemoveCheck(structs.NodeMaintCheckID, true)
|
a.RemoveCheck(structs.NodeMaintCheckID, true)
|
||||||
a.logger.Printf("[INFO] agent: Node left maintenance mode")
|
a.logger.Info("Node left maintenance mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) loadLimits(conf *config.RuntimeConfig) {
|
func (a *Agent) loadLimits(conf *config.RuntimeConfig) {
|
||||||
|
|
|
@ -3,12 +3,12 @@ package agent
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/mitchellh/hashstructure"
|
"github.com/mitchellh/hashstructure"
|
||||||
|
|
||||||
|
@ -21,10 +21,10 @@ import (
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/lib/file"
|
"github.com/hashicorp/consul/lib/file"
|
||||||
"github.com/hashicorp/consul/logger"
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/consul/logging/monitor"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-bexpr"
|
"github.com/hashicorp/go-bexpr"
|
||||||
"github.com/hashicorp/logutils"
|
|
||||||
"github.com/hashicorp/serf/coordinate"
|
"github.com/hashicorp/serf/coordinate"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -111,7 +111,9 @@ func (s *HTTPServer) AgentMetrics(resp http.ResponseWriter, req *http.Request) (
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
handlerOptions := promhttp.HandlerOpts{
|
handlerOptions := promhttp.HandlerOpts{
|
||||||
ErrorLog: s.agent.logger,
|
ErrorLog: s.agent.logger.StandardLogger(&hclog.StandardLoggerOptions{
|
||||||
|
InferLevels: true,
|
||||||
|
}),
|
||||||
ErrorHandling: promhttp.ContinueOnError,
|
ErrorHandling: promhttp.ContinueOnError,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,7 +493,7 @@ func (s *HTTPServer) AgentForceLeave(resp http.ResponseWriter, req *http.Request
|
||||||
// only warn because the write did succeed and anti-entropy will sync later.
|
// only warn because the write did succeed and anti-entropy will sync later.
|
||||||
func (s *HTTPServer) syncChanges() {
|
func (s *HTTPServer) syncChanges() {
|
||||||
if err := s.agent.State.SyncChanges(); err != nil {
|
if err := s.agent.State.SyncChanges(); err != nil {
|
||||||
s.agent.logger.Printf("[ERR] agent: failed to sync changes: %v", err)
|
s.agent.logger.Error("failed to sync changes", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1110,31 +1112,31 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (
|
||||||
logLevel = "INFO"
|
logLevel = "INFO"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upper case the level since that's required by the filter.
|
var logJSON bool
|
||||||
logLevel = strings.ToUpper(logLevel)
|
if _, ok := req.URL.Query()["logjson"]; ok {
|
||||||
|
logJSON = true
|
||||||
// Create a level filter and flusher.
|
|
||||||
filter := logger.LevelFilter()
|
|
||||||
filter.MinLevel = logutils.LogLevel(logLevel)
|
|
||||||
if !logger.ValidateLevelFilter(filter.MinLevel, filter) {
|
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
|
||||||
fmt.Fprintf(resp, "Unknown log level: %s", filter.MinLevel)
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !logging.ValidateLogLevel(logLevel) {
|
||||||
|
return nil, BadRequestError{
|
||||||
|
Reason: fmt.Sprintf("Unknown log level: %s", logLevel),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
flusher, ok := resp.(http.Flusher)
|
flusher, ok := resp.(http.Flusher)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Streaming not supported")
|
return nil, fmt.Errorf("Streaming not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up a log handler.
|
monitor := monitor.New(monitor.Config{
|
||||||
handler := &httpLogHandler{
|
BufferSize: 512,
|
||||||
filter: filter,
|
Logger: s.agent.logger,
|
||||||
logCh: make(chan string, 512),
|
LoggerOptions: &hclog.LoggerOptions{
|
||||||
logger: s.agent.logger,
|
Level: logging.LevelFromString(logLevel),
|
||||||
}
|
JSONFormat: logJSON,
|
||||||
s.agent.LogWriter.RegisterHandler(handler)
|
},
|
||||||
defer s.agent.LogWriter.DeregisterHandler(handler)
|
})
|
||||||
notify := resp.(http.CloseNotifier).CloseNotify()
|
logsCh := monitor.Start()
|
||||||
|
|
||||||
// Send header so client can start streaming body
|
// Send header so client can start streaming body
|
||||||
resp.WriteHeader(http.StatusOK)
|
resp.WriteHeader(http.StatusOK)
|
||||||
|
@ -1147,42 +1149,19 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (
|
||||||
// Stream logs until the connection is closed.
|
// Stream logs until the connection is closed.
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-notify:
|
case <-req.Context().Done():
|
||||||
s.agent.LogWriter.DeregisterHandler(handler)
|
droppedCount := monitor.Stop()
|
||||||
if handler.droppedCount > 0 {
|
if droppedCount > 0 {
|
||||||
s.agent.logger.Printf("[WARN] agent: Dropped %d logs during monitor request", handler.droppedCount)
|
s.agent.logger.Warn("Dropped logs during monitor request", "dropped_count", droppedCount)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
case log := <-handler.logCh:
|
case log := <-logsCh:
|
||||||
fmt.Fprintln(resp, log)
|
fmt.Fprint(resp, string(log))
|
||||||
flusher.Flush()
|
flusher.Flush()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type httpLogHandler struct {
|
|
||||||
filter *logutils.LevelFilter
|
|
||||||
logCh chan string
|
|
||||||
logger *log.Logger
|
|
||||||
droppedCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *httpLogHandler) HandleLog(log string) {
|
|
||||||
// Check the log level
|
|
||||||
if !h.filter.Check([]byte(log)) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do a non-blocking send
|
|
||||||
select {
|
|
||||||
case h.logCh <- log:
|
|
||||||
default:
|
|
||||||
// Just increment a counter for dropped logs to this handler; we can't log now
|
|
||||||
// because the lock is already held by the LogWriter invoking this
|
|
||||||
h.droppedCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||||
if s.checkACLDisabled(resp, req) {
|
if s.checkACLDisabled(resp, req) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -1273,17 +1252,17 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in
|
||||||
|
|
||||||
data, err := json.Marshal(tokens)
|
data, err := json.Marshal(tokens)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.agent.logger.Printf("[WARN] agent: failed to persist tokens - %v", err)
|
s.agent.logger.Warn("failed to persist tokens", "error", err)
|
||||||
return nil, fmt.Errorf("Failed to marshal tokens for persistence: %v", err)
|
return nil, fmt.Errorf("Failed to marshal tokens for persistence: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := file.WriteAtomicWithPerms(filepath.Join(s.agent.config.DataDir, tokensPath), data, 0600); err != nil {
|
if err := file.WriteAtomicWithPerms(filepath.Join(s.agent.config.DataDir, tokensPath), data, 0600); err != nil {
|
||||||
s.agent.logger.Printf("[WARN] agent: failed to persist tokens - %v", err)
|
s.agent.logger.Warn("failed to persist tokens", "error", err)
|
||||||
return nil, fmt.Errorf("Failed to persist tokens - %v", err)
|
return nil, fmt.Errorf("Failed to persist tokens - %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.agent.logger.Printf("[INFO] agent: Updated agent's ACL token %q", target)
|
s.agent.logger.Info("Updated agent's ACL token", "token", target)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,16 +2,15 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -28,7 +27,6 @@ import (
|
||||||
tokenStore "github.com/hashicorp/consul/agent/token"
|
tokenStore "github.com/hashicorp/consul/agent/token"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/logger"
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
@ -1248,7 +1246,7 @@ func TestAgent_Reload(t *testing.T) {
|
||||||
t.Fatal("missing redis service")
|
t.Fatal("missing redis service")
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg2 := TestConfig(testutil.TestLogger(t), config.Source{
|
cfg2 := TestConfig(testutil.Logger(t), config.Source{
|
||||||
Name: "reload",
|
Name: "reload",
|
||||||
Format: "hcl",
|
Format: "hcl",
|
||||||
Data: `
|
Data: `
|
||||||
|
@ -4137,69 +4135,153 @@ func TestAgent_RegisterCheck_Service(t *testing.T) {
|
||||||
|
|
||||||
func TestAgent_Monitor(t *testing.T) {
|
func TestAgent_Monitor(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
logWriter := logger.NewLogWriter(512)
|
a := NewTestAgent(t, t.Name(), "")
|
||||||
a := NewTestAgentWithFields(t, true, TestAgent{
|
|
||||||
LogWriter: logWriter,
|
|
||||||
LogOutput: io.MultiWriter(os.Stderr, logWriter),
|
|
||||||
HCL: `node_name = "invalid!"`,
|
|
||||||
})
|
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||||
|
|
||||||
// Try passing an invalid log level
|
t.Run("unknown log level", func(t *testing.T) {
|
||||||
req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil)
|
// Try passing an invalid log level
|
||||||
resp := newClosableRecorder()
|
req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil)
|
||||||
if _, err := a.srv.AgentMonitor(resp, req); err != nil {
|
resp := httptest.NewRecorder()
|
||||||
t.Fatalf("err: %v", err)
|
_, err := a.srv.AgentMonitor(resp, req)
|
||||||
}
|
if err == nil {
|
||||||
if resp.Code != 400 {
|
t.Fatal("expected BadRequestError to have occurred, got nil")
|
||||||
t.Fatalf("bad: %v", resp.Code)
|
}
|
||||||
}
|
|
||||||
body, _ := ioutil.ReadAll(resp.Body)
|
|
||||||
if !strings.Contains(string(body), "Unknown log level") {
|
|
||||||
t.Fatalf("bad: %s", body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to stream logs until we see the expected log line
|
// Note that BadRequestError is handled outside the endpoint handler so we
|
||||||
retry.Run(t, func(r *retry.R) {
|
// still see a 200 if we check here.
|
||||||
req, _ = http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
|
if _, ok := err.(BadRequestError); !ok {
|
||||||
resp = newClosableRecorder()
|
t.Fatalf("expected BadRequestError to have occurred, got %#v", err)
|
||||||
done := make(chan struct{})
|
}
|
||||||
go func() {
|
|
||||||
if _, err := a.srv.AgentMonitor(resp, req); err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
resp.Close()
|
substring := "Unknown log level"
|
||||||
<-done
|
if !strings.Contains(err.Error(), substring) {
|
||||||
|
t.Fatalf("got: %s, wanted message containing: %s", err.Error(), substring)
|
||||||
got := resp.Body.Bytes()
|
|
||||||
want := []byte(`[WARN] agent: Node name "invalid!" will not be discoverable via DNS`)
|
|
||||||
if !bytes.Contains(got, want) {
|
|
||||||
r.Fatalf("got %q and did not find %q", got, want)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
type closableRecorder struct {
|
t.Run("stream unstructured logs", func(t *testing.T) {
|
||||||
*httptest.ResponseRecorder
|
// Try to stream logs until we see the expected log line
|
||||||
closer chan bool
|
retry.Run(t, func(r *retry.R) {
|
||||||
}
|
req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
|
||||||
|
cancelCtx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
req = req.WithContext(cancelCtx)
|
||||||
|
|
||||||
func newClosableRecorder() *closableRecorder {
|
resp := httptest.NewRecorder()
|
||||||
r := httptest.NewRecorder()
|
errCh := make(chan error)
|
||||||
closer := make(chan bool)
|
go func() {
|
||||||
return &closableRecorder{r, closer}
|
_, err := a.srv.AgentMonitor(resp, req)
|
||||||
}
|
errCh <- err
|
||||||
|
}()
|
||||||
|
|
||||||
func (r *closableRecorder) Close() {
|
args := &structs.ServiceDefinition{
|
||||||
close(r.closer)
|
Name: "monitor",
|
||||||
}
|
Port: 8000,
|
||||||
|
Check: structs.CheckType{
|
||||||
|
TTL: 15 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func (r *closableRecorder) CloseNotify() <-chan bool {
|
registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
||||||
return r.closer
|
if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until we have received some type of logging output
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return len(resp.Body.Bytes()) > 0
|
||||||
|
}, 3*time.Second, 100*time.Millisecond)
|
||||||
|
|
||||||
|
cancelFunc()
|
||||||
|
err := <-errCh
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
got := resp.Body.String()
|
||||||
|
|
||||||
|
// Only check a substring that we are highly confident in finding
|
||||||
|
want := "Synced service: service="
|
||||||
|
if !strings.Contains(got, want) {
|
||||||
|
r.Fatalf("got %q and did not find %q", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("stream JSON logs", func(t *testing.T) {
|
||||||
|
// Try to stream logs until we see the expected log line
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug&logjson", nil)
|
||||||
|
cancelCtx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
req = req.WithContext(cancelCtx)
|
||||||
|
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
errCh := make(chan error)
|
||||||
|
go func() {
|
||||||
|
_, err := a.srv.AgentMonitor(resp, req)
|
||||||
|
errCh <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
args := &structs.ServiceDefinition{
|
||||||
|
Name: "monitor",
|
||||||
|
Port: 8000,
|
||||||
|
Check: structs.CheckType{
|
||||||
|
TTL: 15 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
||||||
|
if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until we have received some type of logging output
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return len(resp.Body.Bytes()) > 0
|
||||||
|
}, 3*time.Second, 100*time.Millisecond)
|
||||||
|
|
||||||
|
cancelFunc()
|
||||||
|
err := <-errCh
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Each line is output as a separate JSON object, we grab the first and
|
||||||
|
// make sure it can be unmarshalled.
|
||||||
|
firstLine := bytes.Split(resp.Body.Bytes(), []byte("\n"))[0]
|
||||||
|
var output map[string]interface{}
|
||||||
|
if err := json.Unmarshal(firstLine, &output); err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
// hopefully catch any potential regression in serf/memberlist logging setup.
|
||||||
|
t.Run("serf shutdown logging", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
|
||||||
|
cancelCtx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
req = req.WithContext(cancelCtx)
|
||||||
|
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
errCh := make(chan error)
|
||||||
|
go func() {
|
||||||
|
_, err := a.srv.AgentMonitor(resp, req)
|
||||||
|
errCh <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
require.NoError(t, a.Shutdown())
|
||||||
|
|
||||||
|
// Wait until we have received some type of logging output
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return len(resp.Body.Bytes()) > 0
|
||||||
|
}, 3*time.Second, 100*time.Millisecond)
|
||||||
|
|
||||||
|
cancelFunc()
|
||||||
|
err := <-errCh
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
got := resp.Body.String()
|
||||||
|
want := "serf: Shutdown without a Leave"
|
||||||
|
if !strings.Contains(got, want) {
|
||||||
|
t.Fatalf("got %q and did not find %q", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgent_Monitor_ACLDeny(t *testing.T) {
|
func TestAgent_Monitor_ACLDeny(t *testing.T) {
|
||||||
|
|
|
@ -1110,7 +1110,7 @@ func verifyIndexChurn(t *testing.T, tags []string) {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
for _, name := range before.Nodes[0].Checks {
|
for _, name := range before.Nodes[0].Checks {
|
||||||
a.logger.Println("[DEBUG] Checks Registered: ", name.Name)
|
a.logger.Debug("Registered node", "node", name.Name)
|
||||||
}
|
}
|
||||||
if got, want := len(before.Nodes), 1; got != want {
|
if got, want := len(before.Nodes), 1; got != want {
|
||||||
t.Fatalf("got %d want %d", got, want)
|
t.Fatalf("got %d want %d", got, want)
|
||||||
|
@ -1120,7 +1120,7 @@ func verifyIndexChurn(t *testing.T, tags []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
a.logger.Println("[INFO] # ", i+1, "Sync in progress ")
|
a.logger.Info("Sync in progress", "iteration", i+1)
|
||||||
if err := a.sync.State.SyncFull(); err != nil {
|
if err := a.sync.State.SyncFull(); err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1416,7 +1416,7 @@ func TestAgent_RestoreServiceWithAliasCheck(t *testing.T) {
|
||||||
// We do this so that the agent logs and the informational messages from
|
// We do this so that the agent logs and the informational messages from
|
||||||
// the test itself are interwoven properly.
|
// the test itself are interwoven properly.
|
||||||
logf := func(t *testing.T, a *TestAgent, format string, args ...interface{}) {
|
logf := func(t *testing.T, a *TestAgent, format string, args ...interface{}) {
|
||||||
a.logger.Printf("[INFO] testharness: "+format, args...)
|
a.logger.Info("testharness: " + fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
dataDir := testutil.TempDir(t, "agent") // we manage the data dir
|
||||||
|
@ -3429,7 +3429,7 @@ func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) {
|
||||||
key_file = "../test/key/ourdomain.key"
|
key_file = "../test/key/ourdomain.key"
|
||||||
verify_server_hostname = true
|
verify_server_hostname = true
|
||||||
`
|
`
|
||||||
c := TestConfig(testutil.TestLogger(t), config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
c := TestConfig(testutil.Logger(t), config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
||||||
require.NoError(t, a.ReloadConfig(c))
|
require.NoError(t, a.ReloadConfig(c))
|
||||||
tlsConf = a.tlsConfigurator.OutgoingRPCConfig()
|
tlsConf = a.tlsConfigurator.OutgoingRPCConfig()
|
||||||
require.False(t, tlsConf.InsecureSkipVerify)
|
require.False(t, tlsConf.InsecureSkipVerify)
|
||||||
|
@ -3468,7 +3468,7 @@ func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) {
|
||||||
key_file = "../test/key/ourdomain.key"
|
key_file = "../test/key/ourdomain.key"
|
||||||
verify_server_hostname = true
|
verify_server_hostname = true
|
||||||
`
|
`
|
||||||
c := TestConfig(testutil.TestLogger(t), config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
c := TestConfig(testutil.Logger(t), config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
||||||
require.NoError(t, a.ReloadConfig(c))
|
require.NoError(t, a.ReloadConfig(c))
|
||||||
tlsConf, err = tlsConf.GetConfigForClient(nil)
|
tlsConf, err = tlsConf.GetConfigForClient(nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -3497,7 +3497,7 @@ func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) {
|
||||||
data_dir = "` + dataDir + `"
|
data_dir = "` + dataDir + `"
|
||||||
verify_incoming = true
|
verify_incoming = true
|
||||||
`
|
`
|
||||||
c := TestConfig(testutil.TestLogger(t), config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
c := TestConfig(testutil.Logger(t), config.Source{Name: t.Name(), Format: "hcl", Data: hcl})
|
||||||
require.Error(t, a.ReloadConfig(c))
|
require.Error(t, a.ReloadConfig(c))
|
||||||
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -4,8 +4,6 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -15,6 +13,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
|
||||||
"github.com/armon/circbuf"
|
"github.com/armon/circbuf"
|
||||||
"github.com/hashicorp/consul/agent/exec"
|
"github.com/hashicorp/consul/agent/exec"
|
||||||
|
@ -65,7 +65,7 @@ type CheckMonitor struct {
|
||||||
ScriptArgs []string
|
ScriptArgs []string
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
OutputMaxSize int
|
OutputMaxSize int
|
||||||
StatusHandler *StatusHandler
|
StatusHandler *StatusHandler
|
||||||
|
|
||||||
|
@ -121,7 +121,10 @@ func (c *CheckMonitor) check() {
|
||||||
cmd, err = exec.Script(c.Script)
|
cmd, err = exec.Script(c.Script)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Logger.Printf("[ERR] agent: Check %q failed to setup: %s", c.CheckID.String(), err)
|
c.Logger.Error("Check failed to setup",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -138,13 +141,19 @@ func (c *CheckMonitor) check() {
|
||||||
outputStr = fmt.Sprintf("Captured %d of %d bytes\n...\n%s",
|
outputStr = fmt.Sprintf("Captured %d of %d bytes\n...\n%s",
|
||||||
output.Size(), output.TotalWritten(), outputStr)
|
output.Size(), output.TotalWritten(), outputStr)
|
||||||
}
|
}
|
||||||
c.Logger.Printf("[TRACE] agent: Check %q output: %s", c.CheckID.String(), outputStr)
|
c.Logger.Trace("Check output",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"output", outputStr,
|
||||||
|
)
|
||||||
return outputStr
|
return outputStr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the check
|
// Start the check
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
c.Logger.Printf("[ERR] agent: Check %q failed to invoke: %s", c.CheckID.String(), err)
|
c.Logger.Error("Check failed to invoke",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -162,11 +171,17 @@ func (c *CheckMonitor) check() {
|
||||||
select {
|
select {
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
if err := exec.KillCommandSubtree(cmd); err != nil {
|
if err := exec.KillCommandSubtree(cmd); err != nil {
|
||||||
c.Logger.Printf("[WARN] agent: Check %q failed to kill after timeout: %s", c.CheckID.String(), err)
|
c.Logger.Warn("Check failed to kill after timeout",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := fmt.Sprintf("Timed out (%s) running check", timeout.String())
|
msg := fmt.Sprintf("Timed out (%s) running check", timeout.String())
|
||||||
c.Logger.Printf("[WARN] agent: Check %q: %s", c.CheckID.String(), msg)
|
c.Logger.Warn("Timed out running check",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"timeout", timeout.String(),
|
||||||
|
)
|
||||||
|
|
||||||
outputStr := truncateAndLogOutput()
|
outputStr := truncateAndLogOutput()
|
||||||
if len(outputStr) > 0 {
|
if len(outputStr) > 0 {
|
||||||
|
@ -215,7 +230,7 @@ type CheckTTL struct {
|
||||||
CheckID structs.CheckID
|
CheckID structs.CheckID
|
||||||
ServiceID structs.ServiceID
|
ServiceID structs.ServiceID
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
|
|
||||||
timer *time.Timer
|
timer *time.Timer
|
||||||
|
|
||||||
|
@ -258,8 +273,9 @@ func (c *CheckTTL) run() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.timer.C:
|
case <-c.timer.C:
|
||||||
c.Logger.Printf("[WARN] agent: Check %q missed TTL, is now critical",
|
c.Logger.Warn("Check missed TTL, is now critical",
|
||||||
c.CheckID.String())
|
"check", c.CheckID.String(),
|
||||||
|
)
|
||||||
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, c.getExpiredOutput())
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, c.getExpiredOutput())
|
||||||
|
|
||||||
case <-c.stopCh:
|
case <-c.stopCh:
|
||||||
|
@ -285,7 +301,10 @@ func (c *CheckTTL) getExpiredOutput() string {
|
||||||
// and to renew the TTL. If expired, TTL is restarted.
|
// and to renew the TTL. If expired, TTL is restarted.
|
||||||
// output is returned (might be truncated)
|
// output is returned (might be truncated)
|
||||||
func (c *CheckTTL) SetStatus(status, output string) string {
|
func (c *CheckTTL) SetStatus(status, output string) string {
|
||||||
c.Logger.Printf("[DEBUG] agent: Check %q status is now %s", c.CheckID.String(), status)
|
c.Logger.Debug("Check status updated",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"status", status,
|
||||||
|
)
|
||||||
total := len(output)
|
total := len(output)
|
||||||
if total > c.OutputMaxSize {
|
if total > c.OutputMaxSize {
|
||||||
output = fmt.Sprintf("%s ... (captured %d of %d bytes)",
|
output = fmt.Sprintf("%s ... (captured %d of %d bytes)",
|
||||||
|
@ -316,7 +335,7 @@ type CheckHTTP struct {
|
||||||
Method string
|
Method string
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
TLSClientConfig *tls.Config
|
TLSClientConfig *tls.Config
|
||||||
OutputMaxSize int
|
OutputMaxSize int
|
||||||
StatusHandler *StatusHandler
|
StatusHandler *StatusHandler
|
||||||
|
@ -450,7 +469,10 @@ func (c *CheckHTTP) check() {
|
||||||
// Read the response into a circular buffer to limit the size
|
// Read the response into a circular buffer to limit the size
|
||||||
output, _ := circbuf.NewBuffer(int64(c.OutputMaxSize))
|
output, _ := circbuf.NewBuffer(int64(c.OutputMaxSize))
|
||||||
if _, err := io.Copy(output, resp.Body); err != nil {
|
if _, err := io.Copy(output, resp.Body); err != nil {
|
||||||
c.Logger.Printf("[WARN] agent: Check %q error while reading body: %s", c.CheckID.String(), err)
|
c.Logger.Warn("Check error while reading body",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format the response body
|
// Format the response body
|
||||||
|
@ -481,7 +503,7 @@ type CheckTCP struct {
|
||||||
TCP string
|
TCP string
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
StatusHandler *StatusHandler
|
StatusHandler *StatusHandler
|
||||||
|
|
||||||
dialer *net.Dialer
|
dialer *net.Dialer
|
||||||
|
@ -542,7 +564,10 @@ func (c *CheckTCP) run() {
|
||||||
func (c *CheckTCP) check() {
|
func (c *CheckTCP) check() {
|
||||||
conn, err := c.dialer.Dial(`tcp`, c.TCP)
|
conn, err := c.dialer.Dial(`tcp`, c.TCP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Logger.Printf("[WARN] agent: Check %q socket connection failed: %s", c.CheckID.String(), err)
|
c.Logger.Warn("Check socket connection failed",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -563,7 +588,7 @@ type CheckDocker struct {
|
||||||
DockerContainerID string
|
DockerContainerID string
|
||||||
Shell string
|
Shell string
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
Client *DockerClient
|
Client *DockerClient
|
||||||
StatusHandler *StatusHandler
|
StatusHandler *StatusHandler
|
||||||
|
|
||||||
|
@ -576,7 +601,7 @@ func (c *CheckDocker) Start() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Logger == nil {
|
if c.Logger == nil {
|
||||||
c.Logger = log.New(ioutil.Discard, "", 0)
|
c.Logger = testutil.NewDiscardLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Shell == "" {
|
if c.Shell == "" {
|
||||||
|
@ -615,7 +640,10 @@ func (c *CheckDocker) check() {
|
||||||
var out string
|
var out string
|
||||||
status, b, err := c.doCheck()
|
status, b, err := c.doCheck()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Logger.Printf("[DEBUG] agent: Check %q: %s", c.CheckID.String(), err)
|
c.Logger.Debug("Check failed",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
out = err.Error()
|
out = err.Error()
|
||||||
} else {
|
} else {
|
||||||
// out is already limited to CheckBufSize since we're getting a
|
// out is already limited to CheckBufSize since we're getting a
|
||||||
|
@ -625,7 +653,10 @@ func (c *CheckDocker) check() {
|
||||||
if int(b.TotalWritten()) > len(out) {
|
if int(b.TotalWritten()) > len(out) {
|
||||||
out = fmt.Sprintf("Captured %d of %d bytes\n...\n%s", len(out), b.TotalWritten(), out)
|
out = fmt.Sprintf("Captured %d of %d bytes\n...\n%s", len(out), b.TotalWritten(), out)
|
||||||
}
|
}
|
||||||
c.Logger.Printf("[TRACE] agent: Check %q output: %s", c.CheckID.String(), out)
|
c.Logger.Trace("Check output",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"output", out,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
c.StatusHandler.updateCheck(c.CheckID, status, out)
|
c.StatusHandler.updateCheck(c.CheckID, status, out)
|
||||||
}
|
}
|
||||||
|
@ -657,10 +688,16 @@ func (c *CheckDocker) doCheck() (string, *circbuf.Buffer, error) {
|
||||||
case 0:
|
case 0:
|
||||||
return api.HealthPassing, buf, nil
|
return api.HealthPassing, buf, nil
|
||||||
case 1:
|
case 1:
|
||||||
c.Logger.Printf("[DEBUG] agent: Check %q failed with exit code: %d", c.CheckID.String(), exitCode)
|
c.Logger.Debug("Check failed",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"exit_code", exitCode,
|
||||||
|
)
|
||||||
return api.HealthWarning, buf, nil
|
return api.HealthWarning, buf, nil
|
||||||
default:
|
default:
|
||||||
c.Logger.Printf("[DEBUG] agent: Check %q failed with exit code: %d", c.CheckID.String(), exitCode)
|
c.Logger.Debug("Check failed",
|
||||||
|
"check", c.CheckID.String(),
|
||||||
|
"exit_code", exitCode,
|
||||||
|
)
|
||||||
return api.HealthCritical, buf, nil
|
return api.HealthCritical, buf, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -678,7 +715,7 @@ type CheckGRPC struct {
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
TLSClientConfig *tls.Config
|
TLSClientConfig *tls.Config
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
StatusHandler *StatusHandler
|
StatusHandler *StatusHandler
|
||||||
|
|
||||||
probe *GrpcHealthProbe
|
probe *GrpcHealthProbe
|
||||||
|
@ -757,7 +794,7 @@ func (c *CheckGRPC) Stop() {
|
||||||
// reaches the given threshold.
|
// reaches the given threshold.
|
||||||
type StatusHandler struct {
|
type StatusHandler struct {
|
||||||
inner CheckNotifier
|
inner CheckNotifier
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
successBeforePassing int
|
successBeforePassing int
|
||||||
successCounter int
|
successCounter int
|
||||||
failuresBeforeCritical int
|
failuresBeforeCritical int
|
||||||
|
@ -765,7 +802,7 @@ type StatusHandler struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStatusHandler set counters values to threshold in order to immediatly update status after first check.
|
// NewStatusHandler set counters values to threshold in order to immediatly update status after first check.
|
||||||
func NewStatusHandler(inner CheckNotifier, logger *log.Logger, successBeforePassing, failuresBeforeCritical int) *StatusHandler {
|
func NewStatusHandler(inner CheckNotifier, logger hclog.Logger, successBeforePassing, failuresBeforeCritical int) *StatusHandler {
|
||||||
return &StatusHandler{
|
return &StatusHandler{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
inner: inner,
|
inner: inner,
|
||||||
|
@ -782,19 +819,32 @@ func (s *StatusHandler) updateCheck(checkID structs.CheckID, status, output stri
|
||||||
s.successCounter++
|
s.successCounter++
|
||||||
s.failuresCounter = 0
|
s.failuresCounter = 0
|
||||||
if s.successCounter >= s.successBeforePassing {
|
if s.successCounter >= s.successBeforePassing {
|
||||||
s.logger.Printf("[DEBUG] agent: Check %q is %q", checkID.String(), status)
|
s.logger.Debug("Check status updated",
|
||||||
|
"check", checkID.String(),
|
||||||
|
"status", status,
|
||||||
|
)
|
||||||
s.inner.UpdateCheck(checkID, status, output)
|
s.inner.UpdateCheck(checkID, status, output)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.logger.Printf("[WARN] agent: Check %q was %q but has not reached success threshold %d/%d", checkID.String(), status, s.successCounter, s.successBeforePassing)
|
s.logger.Warn("Check passed but has not reached success threshold",
|
||||||
|
"check", checkID.String(),
|
||||||
|
"status", status,
|
||||||
|
"success_count", s.successCounter,
|
||||||
|
"success_threshold", s.successBeforePassing,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
s.failuresCounter++
|
s.failuresCounter++
|
||||||
s.successCounter = 0
|
s.successCounter = 0
|
||||||
if s.failuresCounter >= s.failuresBeforeCritical {
|
if s.failuresCounter >= s.failuresBeforeCritical {
|
||||||
s.logger.Printf("[WARN] agent: Check %q is now critical", checkID.String())
|
s.logger.Warn("Check is now critical", "check", checkID.String())
|
||||||
s.inner.UpdateCheck(checkID, status, output)
|
s.inner.UpdateCheck(checkID, status, output)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.logger.Printf("[WARN] agent: Check %q failed but has not reached failure threshold %d/%d", checkID.String(), s.failuresCounter, s.failuresBeforeCritical)
|
s.logger.Warn("Check failed but has not reached failure threshold",
|
||||||
|
"check", checkID.String(),
|
||||||
|
"status", status,
|
||||||
|
"failure_count", s.failuresCounter,
|
||||||
|
"failure_threshold", s.failuresBeforeCritical,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,6 @@ package checks
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
@ -18,6 +16,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/mock"
|
"github.com/hashicorp/consul/agent/mock"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -44,7 +43,7 @@ func TestCheckMonitor_Script(t *testing.T) {
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.status, func(t *testing.T) {
|
t.Run(tt.status, func(t *testing.T) {
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
|
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
@ -85,9 +84,10 @@ func TestCheckMonitor_Args(t *testing.T) {
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.status, func(t *testing.T) {
|
t.Run(tt.status, func(t *testing.T) {
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
check := &CheckMonitor{
|
check := &CheckMonitor{
|
||||||
Notify: notif,
|
Notify: notif,
|
||||||
CheckID: cid,
|
CheckID: cid,
|
||||||
|
@ -114,7 +114,7 @@ func TestCheckMonitor_Args(t *testing.T) {
|
||||||
func TestCheckMonitor_Timeout(t *testing.T) {
|
func TestCheckMonitor_Timeout(t *testing.T) {
|
||||||
// t.Parallel() // timing test. no parallel
|
// t.Parallel() // timing test. no parallel
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
|
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
@ -145,7 +145,7 @@ func TestCheckMonitor_Timeout(t *testing.T) {
|
||||||
func TestCheckMonitor_RandomStagger(t *testing.T) {
|
func TestCheckMonitor_RandomStagger(t *testing.T) {
|
||||||
// t.Parallel() // timing test. no parallel
|
// t.Parallel() // timing test. no parallel
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
|
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
@ -177,7 +177,7 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
|
||||||
func TestCheckMonitor_LimitOutput(t *testing.T) {
|
func TestCheckMonitor_LimitOutput(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
|
@ -204,13 +204,14 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
|
||||||
func TestCheckTTL(t *testing.T) {
|
func TestCheckTTL(t *testing.T) {
|
||||||
// t.Parallel() // timing test. no parallel
|
// t.Parallel() // timing test. no parallel
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
|
logger := testutil.Logger(t)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
check := &CheckTTL{
|
check := &CheckTTL{
|
||||||
Notify: notif,
|
Notify: notif,
|
||||||
CheckID: cid,
|
CheckID: cid,
|
||||||
TTL: 200 * time.Millisecond,
|
TTL: 200 * time.Millisecond,
|
||||||
Logger: log.New(ioutil.Discard, uniqueID(), log.LstdFlags),
|
Logger: logger,
|
||||||
}
|
}
|
||||||
check.Start()
|
check.Start()
|
||||||
defer check.Stop()
|
defer check.Stop()
|
||||||
|
@ -327,7 +328,7 @@ func TestCheckHTTP(t *testing.T) {
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
|
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
@ -369,7 +370,8 @@ func TestCheckHTTP_Proxied(t *testing.T) {
|
||||||
defer proxy.Close()
|
defer proxy.Close()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
|
||||||
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
|
@ -405,7 +407,7 @@ func TestCheckHTTP_NotProxied(t *testing.T) {
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
|
@ -520,7 +522,7 @@ func TestCheckMaxOutputSize(t *testing.T) {
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
maxOutputSize := 32
|
maxOutputSize := 32
|
||||||
cid := structs.NewCheckID("bar", nil)
|
cid := structs.NewCheckID("bar", nil)
|
||||||
|
|
||||||
|
@ -558,7 +560,7 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
|
|
||||||
cid := structs.NewCheckID("bar", nil)
|
cid := structs.NewCheckID("bar", nil)
|
||||||
|
@ -587,7 +589,7 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
||||||
func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
check := &CheckHTTP{
|
check := &CheckHTTP{
|
||||||
|
@ -629,7 +631,7 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
|
|
||||||
cid := structs.NewCheckID("skipverify_true", nil)
|
cid := structs.NewCheckID("skipverify_true", nil)
|
||||||
|
@ -667,9 +669,10 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("skipverify_false", nil)
|
cid := structs.NewCheckID("skipverify_false", nil)
|
||||||
|
|
||||||
check := &CheckHTTP{
|
check := &CheckHTTP{
|
||||||
CheckID: cid,
|
CheckID: cid,
|
||||||
HTTP: server.URL,
|
HTTP: server.URL,
|
||||||
|
@ -718,9 +721,10 @@ func mockTCPServer(network string) net.Listener {
|
||||||
|
|
||||||
func expectTCPStatus(t *testing.T, tcp string, status string) {
|
func expectTCPStatus(t *testing.T, tcp string, status string) {
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
check := &CheckTCP{
|
check := &CheckTCP{
|
||||||
CheckID: cid,
|
CheckID: cid,
|
||||||
TCP: tcp,
|
TCP: tcp,
|
||||||
|
@ -744,7 +748,7 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
||||||
|
|
||||||
// Set the initial status to passing after a single success
|
// Set the initial status to passing after a single success
|
||||||
|
@ -786,7 +790,7 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
||||||
|
|
||||||
// Set the initial status to passing after a single success
|
// Set the initial status to passing after a single success
|
||||||
|
@ -1126,8 +1130,10 @@ func TestCheck_Docker(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
notif, upd := mock.NewNotifyChan()
|
notif, upd := mock.NewNotifyChan()
|
||||||
statusHandler := NewStatusHandler(notif, log.New(ioutil.Discard, uniqueID(), log.LstdFlags), 0, 0)
|
logger := testutil.Logger(t)
|
||||||
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
id := structs.NewCheckID("chk", nil)
|
id := structs.NewCheckID("chk", nil)
|
||||||
|
|
||||||
check := &CheckDocker{
|
check := &CheckDocker{
|
||||||
CheckID: id,
|
CheckID: id,
|
||||||
ScriptArgs: []string{"/health.sh"},
|
ScriptArgs: []string{"/health.sh"},
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/health"
|
"google.golang.org/grpc/health"
|
||||||
|
@ -107,9 +108,14 @@ func TestGRPC_Proxied(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := hclog.New(&hclog.LoggerOptions{
|
||||||
|
Name: uniqueID(),
|
||||||
|
Output: ioutil.Discard,
|
||||||
|
})
|
||||||
|
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
check := &CheckGRPC{
|
check := &CheckGRPC{
|
||||||
CheckID: cid,
|
CheckID: cid,
|
||||||
GRPC: "",
|
GRPC: "",
|
||||||
|
@ -136,9 +142,14 @@ func TestGRPC_NotProxied(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
notif := mock.NewNotify()
|
notif := mock.NewNotify()
|
||||||
logger := log.New(ioutil.Discard, uniqueID(), log.LstdFlags)
|
logger := hclog.New(&hclog.LoggerOptions{
|
||||||
|
Name: uniqueID(),
|
||||||
|
Output: ioutil.Discard,
|
||||||
|
})
|
||||||
|
|
||||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||||
cid := structs.NewCheckID("foo", nil)
|
cid := structs.NewCheckID("foo", nil)
|
||||||
|
|
||||||
check := &CheckGRPC{
|
check := &CheckGRPC{
|
||||||
CheckID: cid,
|
CheckID: cid,
|
||||||
GRPC: server,
|
GRPC: server,
|
||||||
|
|
|
@ -911,6 +911,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
|
||||||
LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime),
|
LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime),
|
||||||
LeaveOnTerm: leaveOnTerm,
|
LeaveOnTerm: leaveOnTerm,
|
||||||
LogLevel: b.stringVal(c.LogLevel),
|
LogLevel: b.stringVal(c.LogLevel),
|
||||||
|
LogJSON: b.boolVal(c.LogJSON),
|
||||||
LogFile: b.stringVal(c.LogFile),
|
LogFile: b.stringVal(c.LogFile),
|
||||||
LogRotateBytes: b.intVal(c.LogRotateBytes),
|
LogRotateBytes: b.intVal(c.LogRotateBytes),
|
||||||
LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration),
|
LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration),
|
||||||
|
|
|
@ -235,6 +235,7 @@ type Config struct {
|
||||||
LeaveOnTerm *bool `json:"leave_on_terminate,omitempty" hcl:"leave_on_terminate" mapstructure:"leave_on_terminate"`
|
LeaveOnTerm *bool `json:"leave_on_terminate,omitempty" hcl:"leave_on_terminate" mapstructure:"leave_on_terminate"`
|
||||||
Limits Limits `json:"limits,omitempty" hcl:"limits" mapstructure:"limits"`
|
Limits Limits `json:"limits,omitempty" hcl:"limits" mapstructure:"limits"`
|
||||||
LogLevel *string `json:"log_level,omitempty" hcl:"log_level" mapstructure:"log_level"`
|
LogLevel *string `json:"log_level,omitempty" hcl:"log_level" mapstructure:"log_level"`
|
||||||
|
LogJSON *bool `json:"log_json,omitempty" hcl:"log_json" mapstructure:"log_json"`
|
||||||
LogFile *string `json:"log_file,omitempty" hcl:"log_file" mapstructure:"log_file"`
|
LogFile *string `json:"log_file,omitempty" hcl:"log_file" mapstructure:"log_file"`
|
||||||
LogRotateDuration *string `json:"log_rotate_duration,omitempty" hcl:"log_rotate_duration" mapstructure:"log_rotate_duration"`
|
LogRotateDuration *string `json:"log_rotate_duration,omitempty" hcl:"log_rotate_duration" mapstructure:"log_rotate_duration"`
|
||||||
LogRotateBytes *int `json:"log_rotate_bytes,omitempty" hcl:"log_rotate_bytes" mapstructure:"log_rotate_bytes"`
|
LogRotateBytes *int `json:"log_rotate_bytes,omitempty" hcl:"log_rotate_bytes" mapstructure:"log_rotate_bytes"`
|
||||||
|
|
|
@ -83,6 +83,7 @@ func AddFlags(fs *flag.FlagSet, f *Flags) {
|
||||||
add(&f.Config.StartJoinAddrsLAN, "join", "Address of an agent to join at start time. Can be specified multiple times.")
|
add(&f.Config.StartJoinAddrsLAN, "join", "Address of an agent to join at start time. Can be specified multiple times.")
|
||||||
add(&f.Config.StartJoinAddrsWAN, "join-wan", "Address of an agent to join -wan at start time. Can be specified multiple times.")
|
add(&f.Config.StartJoinAddrsWAN, "join-wan", "Address of an agent to join -wan at start time. Can be specified multiple times.")
|
||||||
add(&f.Config.LogLevel, "log-level", "Log level of the agent.")
|
add(&f.Config.LogLevel, "log-level", "Log level of the agent.")
|
||||||
|
add(&f.Config.LogJSON, "log-json", "Output logs in JSON format.")
|
||||||
add(&f.Config.LogFile, "log-file", "Path to the file the logs get written to")
|
add(&f.Config.LogFile, "log-file", "Path to the file the logs get written to")
|
||||||
add(&f.Config.LogRotateBytes, "log-rotate-bytes", "Maximum number of bytes that should be written to a log file")
|
add(&f.Config.LogRotateBytes, "log-rotate-bytes", "Maximum number of bytes that should be written to a log file")
|
||||||
add(&f.Config.LogRotateDuration, "log-rotate-duration", "Time after which log rotation needs to be performed")
|
add(&f.Config.LogRotateDuration, "log-rotate-duration", "Time after which log rotation needs to be performed")
|
||||||
|
|
|
@ -839,6 +839,12 @@ type RuntimeConfig struct {
|
||||||
// hcl: log_level = string
|
// hcl: log_level = string
|
||||||
LogLevel string
|
LogLevel string
|
||||||
|
|
||||||
|
// LogJSON controls whether to output logs as structured JSON. Defaults to false.
|
||||||
|
//
|
||||||
|
// hcl: log_json = (true|false)
|
||||||
|
// flag: -log-json
|
||||||
|
LogJSON bool
|
||||||
|
|
||||||
// LogFile is the path to the file where the logs get written to. Defaults to empty string.
|
// LogFile is the path to the file where the logs get written to. Defaults to empty string.
|
||||||
//
|
//
|
||||||
// hcl: log_file = string
|
// hcl: log_file = string
|
||||||
|
|
|
@ -541,6 +541,17 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
|
||||||
rt.DataDir = dataDir
|
rt.DataDir = dataDir
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "-log-json",
|
||||||
|
args: []string{
|
||||||
|
`-log-json`,
|
||||||
|
`-data-dir=` + dataDir,
|
||||||
|
},
|
||||||
|
patch: func(rt *RuntimeConfig) {
|
||||||
|
rt.LogJSON = true
|
||||||
|
rt.DataDir = dataDir
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
desc: "-log-rotate-max-files",
|
desc: "-log-rotate-max-files",
|
||||||
args: []string{
|
args: []string{
|
||||||
|
@ -3865,6 +3876,7 @@ func TestFullConfig(t *testing.T) {
|
||||||
"kv_max_value_size": 1234567800000000
|
"kv_max_value_size": 1234567800000000
|
||||||
},
|
},
|
||||||
"log_level": "k1zo9Spt",
|
"log_level": "k1zo9Spt",
|
||||||
|
"log_json": true,
|
||||||
"max_query_time": "18237s",
|
"max_query_time": "18237s",
|
||||||
"node_id": "AsUIlw99",
|
"node_id": "AsUIlw99",
|
||||||
"node_meta": {
|
"node_meta": {
|
||||||
|
@ -4470,6 +4482,7 @@ func TestFullConfig(t *testing.T) {
|
||||||
kv_max_value_size = 1234567800000000
|
kv_max_value_size = 1234567800000000
|
||||||
}
|
}
|
||||||
log_level = "k1zo9Spt"
|
log_level = "k1zo9Spt"
|
||||||
|
log_json = true
|
||||||
max_query_time = "18237s"
|
max_query_time = "18237s"
|
||||||
node_id = "AsUIlw99"
|
node_id = "AsUIlw99"
|
||||||
node_meta {
|
node_meta {
|
||||||
|
@ -5163,6 +5176,7 @@ func TestFullConfig(t *testing.T) {
|
||||||
LeaveDrainTime: 8265 * time.Second,
|
LeaveDrainTime: 8265 * time.Second,
|
||||||
LeaveOnTerm: true,
|
LeaveOnTerm: true,
|
||||||
LogLevel: "k1zo9Spt",
|
LogLevel: "k1zo9Spt",
|
||||||
|
LogJSON: true,
|
||||||
MaxQueryTime: 18237 * time.Second,
|
MaxQueryTime: 18237 * time.Second,
|
||||||
NodeID: types.NodeID("AsUIlw99"),
|
NodeID: types.NodeID("AsUIlw99"),
|
||||||
NodeMeta: map[string]string{"5mgGQMBk": "mJLtVMSG", "A7ynFMJB": "0Nx6RGab"},
|
NodeMeta: map[string]string{"5mgGQMBk": "mJLtVMSG", "A7ynFMJB": "0Nx6RGab"},
|
||||||
|
@ -6034,6 +6048,7 @@ func TestSanitize(t *testing.T) {
|
||||||
"LeaveDrainTime": "0s",
|
"LeaveDrainTime": "0s",
|
||||||
"LeaveOnTerm": false,
|
"LeaveOnTerm": false,
|
||||||
"LogLevel": "",
|
"LogLevel": "",
|
||||||
|
"LogJSON": false,
|
||||||
"LogFile": "",
|
"LogFile": "",
|
||||||
"LogRotateBytes": 0,
|
"LogRotateBytes": 0,
|
||||||
"LogRotateDuration": "0s",
|
"LogRotateDuration": "0s",
|
||||||
|
|
|
@ -3,7 +3,8 @@ package ca
|
||||||
import (
|
import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockery -name Provider -inpkg
|
//go:generate mockery -name Provider -inpkg
|
||||||
|
@ -161,6 +162,5 @@ type Provider interface {
|
||||||
// Consul logger to output diagnostic messages.
|
// Consul logger to output diagnostic messages.
|
||||||
type NeedsLogger interface {
|
type NeedsLogger interface {
|
||||||
// SetLogger will pass a configured Logger to the provider.
|
// SetLogger will pass a configured Logger to the provider.
|
||||||
// TODO(hclog) convert this to an hclog.Logger.
|
SetLogger(logger hclog.Logger)
|
||||||
SetLogger(logger *log.Logger)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -17,6 +16,8 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -73,12 +74,15 @@ type AWSProvider struct {
|
||||||
caCreated bool
|
caCreated bool
|
||||||
rootPEM string
|
rootPEM string
|
||||||
intermediatePEM string
|
intermediatePEM string
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLogger implements NeedsLogger
|
// SetLogger implements NeedsLogger
|
||||||
func (a *AWSProvider) SetLogger(l *log.Logger) {
|
func (a *AWSProvider) SetLogger(logger hclog.Logger) {
|
||||||
a.logger = l
|
a.logger = logger.
|
||||||
|
ResetNamed(logging.Connect).
|
||||||
|
Named(logging.CA).
|
||||||
|
Named(logging.AWS)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure implements Provider
|
// Configure implements Provider
|
||||||
|
@ -221,7 +225,7 @@ func (a *AWSProvider) ensureCA() error {
|
||||||
Certificate: []byte(certPEM),
|
Certificate: []byte(certPEM),
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] connect.ca.aws: uploading certificate for %s", a.arn)
|
a.logger.Debug("uploading certificate for ARN", "arn", a.arn)
|
||||||
_, err = a.client.ImportCertificateAuthorityCertificate(&input)
|
_, err = a.client.ImportCertificateAuthorityCertificate(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -296,7 +300,7 @@ func (a *AWSProvider) createPCA() error {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] creating new PCA %s", commonName)
|
a.logger.Debug("creating new PCA", "common_name", commonName)
|
||||||
createOutput, err := a.client.CreateCertificateAuthority(&createInput)
|
createOutput, err := a.client.CreateCertificateAuthority(&createInput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -315,8 +319,7 @@ func (a *AWSProvider) createPCA() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if *describeOutput.CertificateAuthority.Status == acmpca.CertificateAuthorityStatusPendingCertificate {
|
if *describeOutput.CertificateAuthority.Status == acmpca.CertificateAuthorityStatusPendingCertificate {
|
||||||
a.logger.Printf("[DEBUG] connect.ca.aws: new PCA %s is ready"+
|
a.logger.Debug("new PCA is ready to accept a certificate", "pca", newARN)
|
||||||
" to accept a certificate", newARN)
|
|
||||||
a.arn = newARN
|
a.arn = newARN
|
||||||
// We don't need to reload this ARN since we just created it and know what
|
// We don't need to reload this ARN since we just created it and know what
|
||||||
// state it's in
|
// state it's in
|
||||||
|
@ -333,7 +336,7 @@ func (a *AWSProvider) getCACSR() (string, error) {
|
||||||
input := &acmpca.GetCertificateAuthorityCsrInput{
|
input := &acmpca.GetCertificateAuthorityCsrInput{
|
||||||
CertificateAuthorityArn: aws.String(a.arn),
|
CertificateAuthorityArn: aws.String(a.arn),
|
||||||
}
|
}
|
||||||
a.logger.Printf("[DEBUG] connect.ca.aws: retrieving CSR for %s", a.arn)
|
a.logger.Debug("retrieving CSR for PCA", "pca", a.arn)
|
||||||
output, err := a.client.GetCertificateAuthorityCsr(input)
|
output, err := a.client.GetCertificateAuthorityCsr(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -413,13 +416,13 @@ func (a *AWSProvider) pollLoop(desc string, timeout time.Duration, f func() (boo
|
||||||
return "", fmt.Errorf("timeout after %s waiting for %s", elapsed, desc)
|
return "", fmt.Errorf("timeout after %s waiting for %s", elapsed, desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] connect.ca.aws: %s pending"+
|
a.logger.Debug(fmt.Sprintf("%s pending, waiting to check readiness", desc),
|
||||||
", waiting %s to check readiness", desc, wait)
|
"wait_time", wait,
|
||||||
|
)
|
||||||
select {
|
select {
|
||||||
case <-a.stopCh:
|
case <-a.stopCh:
|
||||||
// Provider discarded
|
// Provider discarded
|
||||||
a.logger.Print("[WARN] connect.ca.aws: provider instance terminated"+
|
a.logger.Warn(fmt.Sprintf("provider instance terminated while waiting for %s.", desc))
|
||||||
" while waiting for %s.", desc)
|
|
||||||
return "", fmt.Errorf("provider terminated")
|
return "", fmt.Errorf("provider terminated")
|
||||||
case <-time.After(wait):
|
case <-time.After(wait):
|
||||||
// Continue looping...
|
// Continue looping...
|
||||||
|
@ -533,7 +536,7 @@ func (a *AWSProvider) SetIntermediate(intermediatePEM string, rootPEM string) er
|
||||||
Certificate: []byte(intermediatePEM),
|
Certificate: []byte(intermediatePEM),
|
||||||
CertificateChain: []byte(rootPEM),
|
CertificateChain: []byte(rootPEM),
|
||||||
}
|
}
|
||||||
a.logger.Printf("[DEBUG] uploading certificate for %s", a.arn)
|
a.logger.Debug("uploading certificate for PCA", "pca", a.arn)
|
||||||
_, err = a.client.ImportCertificateAuthorityCertificate(&input)
|
_, err = a.client.ImportCertificateAuthorityCertificate(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -594,8 +597,9 @@ func (a *AWSProvider) Sign(csr *x509.CertificateRequest) (string, error) {
|
||||||
return "", fmt.Errorf("AWS CA provider not fully Initialized")
|
return "", fmt.Errorf("AWS CA provider not fully Initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
a.logger.Printf("[DEBUG] connect.ca.aws: signing csr for %s",
|
a.logger.Debug("signing csr for requester",
|
||||||
csr.Subject.CommonName)
|
"requester", csr.Subject.CommonName,
|
||||||
|
)
|
||||||
|
|
||||||
return a.signCSRRaw(csr, LeafTemplateARN, a.config.LeafCertTTL)
|
return a.signCSRRaw(csr, LeafTemplateARN, a.config.LeafCertTTL)
|
||||||
}
|
}
|
||||||
|
@ -624,7 +628,7 @@ func (a *AWSProvider) disablePCA() error {
|
||||||
CertificateAuthorityArn: aws.String(a.arn),
|
CertificateAuthorityArn: aws.String(a.arn),
|
||||||
Status: aws.String(acmpca.CertificateAuthorityStatusDisabled),
|
Status: aws.String(acmpca.CertificateAuthorityStatusDisabled),
|
||||||
}
|
}
|
||||||
a.logger.Printf("[INFO] connect.ca.aws: disabling PCA %s", a.arn)
|
a.logger.Info("disabling PCA", "pca", a.arn)
|
||||||
_, err := a.client.UpdateCertificateAuthority(&input)
|
_, err := a.client.UpdateCertificateAuthority(&input)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -639,7 +643,7 @@ func (a *AWSProvider) deletePCA() error {
|
||||||
// possible (7 days).
|
// possible (7 days).
|
||||||
PermanentDeletionTimeInDays: aws.Int64(7),
|
PermanentDeletionTimeInDays: aws.Int64(7),
|
||||||
}
|
}
|
||||||
a.logger.Printf("[INFO] connect.ca.aws: deleting PCA %s", a.arn)
|
a.logger.Info("deleting PCA", "pca", a.arn)
|
||||||
_, err := a.client.DeleteCertificateAuthority(&input)
|
_, err := a.client.DeleteCertificateAuthority(&input)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -655,14 +659,18 @@ func (a *AWSProvider) Cleanup() error {
|
||||||
if err := a.disablePCA(); err != nil {
|
if err := a.disablePCA(); err != nil {
|
||||||
// Log the error but continue trying to delete as some errors may still
|
// Log the error but continue trying to delete as some errors may still
|
||||||
// allow that and this is best-effort delete anyway.
|
// allow that and this is best-effort delete anyway.
|
||||||
a.logger.Printf("[ERR] connect.ca.aws: failed to disable PCA %s: %s",
|
a.logger.Error("failed to disable PCA",
|
||||||
a.arn, err)
|
"pca", a.arn,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if err := a.deletePCA(); err != nil {
|
if err := a.deletePCA(); err != nil {
|
||||||
// Log the error but continue trying to delete as some errors may still
|
// Log the error but continue trying to delete as some errors may still
|
||||||
// allow that and this is best-effort delete anyway.
|
// allow that and this is best-effort delete anyway.
|
||||||
a.logger.Printf("[ERR] connect.ca.aws: failed to delete PCA %s: %s",
|
a.logger.Error("failed to delete PCA",
|
||||||
a.arn, err)
|
"pca", a.arn,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
// Don't stall leader shutdown, non of the failures here are fatal.
|
// Don't stall leader shutdown, non of the failures here are fatal.
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
package ca
|
package ca
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -240,7 +240,8 @@ func TestAWSNoCrossSigning(t *testing.T) {
|
||||||
|
|
||||||
func testAWSProvider(t *testing.T, cfg ProviderConfig) *AWSProvider {
|
func testAWSProvider(t *testing.T, cfg ProviderConfig) *AWSProvider {
|
||||||
p := &AWSProvider{}
|
p := &AWSProvider{}
|
||||||
p.SetLogger(log.New(&testLogger{t}, "", log.LstdFlags))
|
logger := testutil.Logger(t)
|
||||||
|
p.SetLogger(logger)
|
||||||
require.NoError(t, p.Configure(cfg))
|
require.NoError(t, p.Configure(cfg))
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -18,6 +17,8 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrNotInitialized = errors.New("provider not initialized")
|
var ErrNotInitialized = errors.New("provider not initialized")
|
||||||
|
@ -30,7 +31,7 @@ type ConsulProvider struct {
|
||||||
clusterID string
|
clusterID string
|
||||||
isPrimary bool
|
isPrimary bool
|
||||||
spiffeID *connect.SpiffeIDSigning
|
spiffeID *connect.SpiffeIDSigning
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
// testState is only used to test Consul leader's handling of providers that
|
// testState is only used to test Consul leader's handling of providers that
|
||||||
// need to persist state. Consul provider actually manages it's state directly
|
// need to persist state. Consul provider actually manages it's state directly
|
||||||
|
@ -119,8 +120,10 @@ func (c *ConsulProvider) Configure(cfg ProviderConfig) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Printf("[DEBUG] consul CA provider configured ID=%s IsPrimary=%v",
|
c.logger.Debug("consul CA provider configured",
|
||||||
c.id, c.isPrimary)
|
"id", c.id,
|
||||||
|
"is_primary", c.isPrimary,
|
||||||
|
)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -669,8 +672,11 @@ func (c *ConsulProvider) generateCA(privateKey string, sn uint64) (string, error
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLogger implements the NeedsLogger interface so the provider can log important messages.
|
// SetLogger implements the NeedsLogger interface so the provider can log important messages.
|
||||||
func (c *ConsulProvider) SetLogger(logger *log.Logger) {
|
func (c *ConsulProvider) SetLogger(logger hclog.Logger) {
|
||||||
c.logger = logger
|
c.logger = logger.
|
||||||
|
ResetNamed(logging.Connect).
|
||||||
|
Named(logging.CA).
|
||||||
|
Named(logging.Consul)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConsulProvider) parseTestState(rawConfig map[string]interface{}, state map[string]string) {
|
func (c *ConsulProvider) parseTestState(rawConfig map[string]interface{}, state map[string]string) {
|
||||||
|
|
|
@ -3,9 +3,9 @@ package ca
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/mitchellh/go-testing-interface"
|
"github.com/mitchellh/go-testing-interface"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -70,6 +70,9 @@ func CASigningKeyTypeCases() []CASigningKeyTypes {
|
||||||
// SetLogger can be called again with another logger to capture logs.
|
// SetLogger can be called again with another logger to capture logs.
|
||||||
func TestConsulProvider(t testing.T, d ConsulProviderStateDelegate) *ConsulProvider {
|
func TestConsulProvider(t testing.T, d ConsulProviderStateDelegate) *ConsulProvider {
|
||||||
provider := &ConsulProvider{Delegate: d}
|
provider := &ConsulProvider{Delegate: d}
|
||||||
provider.SetLogger(log.New(ioutil.Discard, "", 0))
|
logger := hclog.New(&hclog.LoggerOptions{
|
||||||
|
Output: ioutil.Discard,
|
||||||
|
})
|
||||||
|
provider.SetLogger(logger)
|
||||||
return provider
|
return provider
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,6 @@ package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -11,6 +9,8 @@ import (
|
||||||
metrics "github.com/armon/go-metrics"
|
metrics "github.com/armon/go-metrics"
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"golang.org/x/sync/singleflight"
|
"golang.org/x/sync/singleflight"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
@ -154,7 +154,7 @@ func (e policyOrRoleTokenError) Error() string {
|
||||||
// ACLResolverConfig holds all the configuration necessary to create an ACLResolver
|
// ACLResolverConfig holds all the configuration necessary to create an ACLResolver
|
||||||
type ACLResolverConfig struct {
|
type ACLResolverConfig struct {
|
||||||
Config *Config
|
Config *Config
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
|
|
||||||
// CacheConfig is a pass through configuration for ACL cache limits
|
// CacheConfig is a pass through configuration for ACL cache limits
|
||||||
CacheConfig *structs.ACLCachesConfig
|
CacheConfig *structs.ACLCachesConfig
|
||||||
|
@ -199,7 +199,7 @@ type ACLResolverConfig struct {
|
||||||
//
|
//
|
||||||
type ACLResolver struct {
|
type ACLResolver struct {
|
||||||
config *Config
|
config *Config
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
delegate ACLResolverDelegate
|
delegate ACLResolverDelegate
|
||||||
aclConf *acl.Config
|
aclConf *acl.Config
|
||||||
|
@ -231,7 +231,7 @@ func NewACLResolver(config *ACLResolverConfig) (*ACLResolver, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Logger == nil {
|
if config.Logger == nil {
|
||||||
config.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
config.Logger = hclog.New(&hclog.LoggerOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, err := structs.NewACLCaches(config.CacheConfig)
|
cache, err := structs.NewACLCaches(config.CacheConfig)
|
||||||
|
@ -253,7 +253,7 @@ func NewACLResolver(config *ACLResolverConfig) (*ACLResolver, error) {
|
||||||
|
|
||||||
return &ACLResolver{
|
return &ACLResolver{
|
||||||
config: config.Config,
|
config: config.Config,
|
||||||
logger: config.Logger,
|
logger: config.Logger.Named(logging.ACL),
|
||||||
delegate: config.Delegate,
|
delegate: config.Delegate,
|
||||||
aclConf: config.ACLConfig,
|
aclConf: config.ACLConfig,
|
||||||
cache: cache,
|
cache: cache,
|
||||||
|
@ -782,7 +782,10 @@ func (r *ACLResolver) collectPoliciesForIdentity(identity structs.ACLIdentity, p
|
||||||
if policy != nil {
|
if policy != nil {
|
||||||
policies = append(policies, policy)
|
policies = append(policies, policy)
|
||||||
} else {
|
} else {
|
||||||
r.logger.Printf("[WARN] acl: policy not found for identity, policy=%q accessorID=%q", policyID, accessorID)
|
r.logger.Warn("policy not found for identity",
|
||||||
|
"policy", policyID,
|
||||||
|
"identity", identity.ID(),
|
||||||
|
"accessorID", accessorID)
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
@ -880,7 +883,11 @@ func (r *ACLResolver) collectRolesForIdentity(identity structs.ACLIdentity, role
|
||||||
if identity != nil {
|
if identity != nil {
|
||||||
accessorID = identity.ID()
|
accessorID = identity.ID()
|
||||||
}
|
}
|
||||||
r.logger.Printf("[WARN] acl: role not found for identity, role=%q accessorID=%q", roleID, accessorID)
|
r.logger.Warn("role not found for identity",
|
||||||
|
"role", roleID,
|
||||||
|
"identity", identity.ID(),
|
||||||
|
"accessorID", accessorID,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
@ -1035,7 +1042,7 @@ func (r *ACLResolver) disableACLsWhenUpstreamDisabled(err error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.logger.Printf("[DEBUG] acl: ACLs disabled on upstream servers, will check again after %s", r.config.ACLDisabledTTL)
|
r.logger.Debug("ACLs disabled on upstream servers, will retry", "retry_interval", r.config.ACLDisabledTTL)
|
||||||
r.disabledLock.Lock()
|
r.disabledLock.Lock()
|
||||||
r.disabled = time.Now().Add(r.config.ACLDisabledTTL)
|
r.disabled = time.Now().Add(r.config.ACLDisabledTTL)
|
||||||
r.disabledLock.Unlock()
|
r.disabledLock.Unlock()
|
||||||
|
@ -1068,7 +1075,7 @@ func (r *ACLResolver) ResolveTokenToIdentityAndAuthorizer(token string) (structs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.disableACLsWhenUpstreamDisabled(err)
|
r.disableACLsWhenUpstreamDisabled(err)
|
||||||
if IsACLRemoteError(err) {
|
if IsACLRemoteError(err) {
|
||||||
r.logger.Printf("[ERR] consul.acl: %v", err)
|
r.logger.Error("Error resolving token", "error", err)
|
||||||
return &missingIdentity{reason: "primary-dc-down", token: token}, r.down, nil
|
return &missingIdentity{reason: "primary-dc-down", token: token}, r.down, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1087,7 +1094,7 @@ func (r *ACLResolver) ResolveTokenToIdentityAndAuthorizer(token string) (structs
|
||||||
authz, err = r.resolveEnterpriseDefaultsForIdentity(identity)
|
authz, err = r.resolveEnterpriseDefaultsForIdentity(identity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsACLRemoteError(err) {
|
if IsACLRemoteError(err) {
|
||||||
r.logger.Printf("[ERR] consul.acl: %v", err)
|
r.logger.Error("Error resolving identity defaults", "error", err)
|
||||||
return identity, r.down, nil
|
return identity, r.down, nil
|
||||||
}
|
}
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
@ -1136,14 +1143,14 @@ func (r *ACLResolver) GetMergedPolicyForToken(token string) (*acl.Policy, error)
|
||||||
// configured for the provided token.
|
// configured for the provided token.
|
||||||
type aclFilter struct {
|
type aclFilter struct {
|
||||||
authorizer acl.Authorizer
|
authorizer acl.Authorizer
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
enforceVersion8 bool
|
enforceVersion8 bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// newACLFilter constructs a new aclFilter.
|
// newACLFilter constructs a new aclFilter.
|
||||||
func newACLFilter(authorizer acl.Authorizer, logger *log.Logger, enforceVersion8 bool) *aclFilter {
|
func newACLFilter(authorizer acl.Authorizer, logger hclog.Logger, enforceVersion8 bool) *aclFilter {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
logger = hclog.New(&hclog.LoggerOptions{})
|
||||||
}
|
}
|
||||||
return &aclFilter{
|
return &aclFilter{
|
||||||
authorizer: authorizer,
|
authorizer: authorizer,
|
||||||
|
@ -1195,7 +1202,7 @@ func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
f.logger.Printf("[DEBUG] consul: dropping check %q from result due to ACLs", check.CheckID)
|
f.logger.Debug("dropping check from result due to ACLs", "check", check.CheckID)
|
||||||
hc = append(hc[:i], hc[i+1:]...)
|
hc = append(hc[:i], hc[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -1211,7 +1218,7 @@ func (f *aclFilter) filterServices(services structs.Services, entMeta *structs.E
|
||||||
if f.allowService(svc, &authzContext) {
|
if f.allowService(svc, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc)
|
f.logger.Debug("dropping service from result due to ACLs", "service", svc)
|
||||||
delete(services, svc)
|
delete(services, svc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1229,7 +1236,7 @@ func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) {
|
||||||
if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) {
|
if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node.Node)
|
f.logger.Debug("dropping node from result due to ACLs", "node", node.Node)
|
||||||
sn = append(sn[:i], sn[i+1:]...)
|
sn = append(sn[:i], sn[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -1255,7 +1262,7 @@ func (f *aclFilter) filterNodeServices(services **structs.NodeServices) {
|
||||||
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svcName, &authzContext) {
|
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svcName, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc.CompoundServiceID())
|
f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID())
|
||||||
delete((*services).Services, svcName)
|
delete((*services).Services, svcName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1282,7 +1289,7 @@ func (f *aclFilter) filterNodeServiceList(services **structs.NodeServiceList) {
|
||||||
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svc.Service, &authzContext) {
|
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svc.Service, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc.CompoundServiceID())
|
f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID())
|
||||||
svcs = append(svcs[:i], svcs[i+1:]...)
|
svcs = append(svcs[:i], svcs[i+1:]...)
|
||||||
i--
|
i--
|
||||||
modified = true
|
modified = true
|
||||||
|
@ -1307,7 +1314,7 @@ func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) {
|
||||||
if f.allowNode(node.Node.Node, &authzContext) && f.allowService(node.Service.Service, &authzContext) {
|
if f.allowNode(node.Node.Node, &authzContext) && f.allowService(node.Service.Service, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node.Node.Node)
|
f.logger.Debug("dropping node from result due to ACLs", "node", node.Node.Node)
|
||||||
csn = append(csn[:i], csn[i+1:]...)
|
csn = append(csn[:i], csn[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -1326,7 +1333,7 @@ func (f *aclFilter) filterSessions(sessions *structs.Sessions) {
|
||||||
if f.allowSession(session.Node, &entCtx) {
|
if f.allowSession(session.Node, &entCtx) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping session %q from result due to ACLs", session.ID)
|
f.logger.Debug("dropping session from result due to ACLs", "session", session.ID)
|
||||||
s = append(s[:i], s[i+1:]...)
|
s = append(s[:i], s[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -1345,7 +1352,7 @@ func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) {
|
||||||
if f.allowNode(node, &authzContext) {
|
if f.allowNode(node, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node)
|
f.logger.Debug("dropping node from result due to ACLs", "node", node)
|
||||||
c = append(c[:i], c[i+1:]...)
|
c = append(c[:i], c[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -1359,7 +1366,7 @@ func (f *aclFilter) filterIntentions(ixns *structs.Intentions) {
|
||||||
ret := make(structs.Intentions, 0, len(*ixns))
|
ret := make(structs.Intentions, 0, len(*ixns))
|
||||||
for _, ixn := range *ixns {
|
for _, ixn := range *ixns {
|
||||||
if !ixn.CanRead(f.authorizer) {
|
if !ixn.CanRead(f.authorizer) {
|
||||||
f.logger.Printf("[DEBUG] consul: dropping intention %q from result due to ACLs", ixn.ID)
|
f.logger.Debug("dropping intention from result due to ACLs", "intention", ixn.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1381,7 +1388,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
|
||||||
// Filter nodes
|
// Filter nodes
|
||||||
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
|
structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||||
if node := info.Node; !f.allowNode(node, &authzContext) {
|
if node := info.Node; !f.allowNode(node, &authzContext) {
|
||||||
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node)
|
f.logger.Debug("dropping node from result due to ACLs", "node", node)
|
||||||
nd = append(nd[:i], nd[i+1:]...)
|
nd = append(nd[:i], nd[i+1:]...)
|
||||||
i--
|
i--
|
||||||
continue
|
continue
|
||||||
|
@ -1394,7 +1401,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
|
||||||
if f.allowNode(info.Node, &authzContext) && f.allowService(svc, &authzContext) {
|
if f.allowNode(info.Node, &authzContext) && f.allowService(svc, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", svc)
|
f.logger.Debug("dropping service from result due to ACLs", "service", svc)
|
||||||
info.Services = append(info.Services[:j], info.Services[j+1:]...)
|
info.Services = append(info.Services[:j], info.Services[j+1:]...)
|
||||||
j--
|
j--
|
||||||
}
|
}
|
||||||
|
@ -1406,7 +1413,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) {
|
||||||
if f.allowNode(info.Node, &authzContext) && f.allowService(chk.ServiceName, &authzContext) {
|
if f.allowNode(info.Node, &authzContext) && f.allowService(chk.ServiceName, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping check %q from result due to ACLs", chk.CheckID)
|
f.logger.Debug("dropping check from result due to ACLs", "check", chk.CheckID)
|
||||||
info.Checks = append(info.Checks[:j], info.Checks[j+1:]...)
|
info.Checks = append(info.Checks[:j], info.Checks[j+1:]...)
|
||||||
j--
|
j--
|
||||||
}
|
}
|
||||||
|
@ -1427,7 +1434,7 @@ func (f *aclFilter) filterNodes(nodes *structs.Nodes) {
|
||||||
if f.allowNode(node, &authzContext) {
|
if f.allowNode(node, &authzContext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.logger.Printf("[DEBUG] consul: dropping node %q from result due to ACLs", node)
|
f.logger.Debug("dropping node from result due to ACLs", "node", node)
|
||||||
n = append(n[:i], n[i+1:]...)
|
n = append(n[:i], n[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
@ -1486,7 +1493,7 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) {
|
||||||
// token, otherwise see what the policy says.
|
// token, otherwise see what the policy says.
|
||||||
prefix, ok := query.GetACLPrefix()
|
prefix, ok := query.GetACLPrefix()
|
||||||
if !ok || f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow {
|
if !ok || f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow {
|
||||||
f.logger.Printf("[DEBUG] consul: dropping prepared query %q from result due to ACLs", query.ID)
|
f.logger.Debug("dropping prepared query from result due to ACLs", "query", query.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1668,7 +1675,7 @@ func (f *aclFilter) filterServiceList(services *structs.ServiceList) {
|
||||||
|
|
||||||
if f.authorizer.ServiceRead(svc.Name, &authzContext) != acl.Allow {
|
if f.authorizer.ServiceRead(svc.Name, &authzContext) != acl.Allow {
|
||||||
sid := structs.NewServiceID(svc.Name, &svc.EnterpriseMeta)
|
sid := structs.NewServiceID(svc.Name, &svc.EnterpriseMeta)
|
||||||
f.logger.Printf("[DEBUG] consul: dropping service %q from result due to ACLs", sid.String())
|
f.logger.Debug("dropping service from result due to ACLs", "service", sid.String())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (c *Client) monitorACLMode() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if canUpgrade {
|
if canUpgrade {
|
||||||
c.logger.Printf("[DEBUG] acl: transition out of legacy ACL mode")
|
c.logger.Debug("transitioned out of legacy ACL mode")
|
||||||
atomic.StoreInt32(&c.useNewACLs, 1)
|
atomic.StoreInt32(&c.useNewACLs, 1)
|
||||||
lib.UpdateSerfTag(c.serf, "acls", string(structs.ACLModeEnabled))
|
lib.UpdateSerfTag(c.serf, "acls", string(structs.ACLModeEnabled))
|
||||||
return
|
return
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/go-bexpr"
|
"github.com/hashicorp/go-bexpr"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
uuid "github.com/hashicorp/go-uuid"
|
uuid "github.com/hashicorp/go-uuid"
|
||||||
)
|
)
|
||||||
|
@ -38,7 +39,8 @@ var (
|
||||||
|
|
||||||
// ACL endpoint is used to manipulate ACLs
|
// ACL endpoint is used to manipulate ACLs
|
||||||
type ACL struct {
|
type ACL struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileBootstrapResetIndex retrieves the reset index specified by the administrator from
|
// fileBootstrapResetIndex retrieves the reset index specified by the administrator from
|
||||||
|
@ -65,7 +67,10 @@ func (a *ACL) fileBootstrapResetIndex() uint64 {
|
||||||
raw, err := ioutil.ReadFile(path)
|
raw, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
a.srv.logger.Printf("[ERR] acl.bootstrap: failed to read %q: %v", path, err)
|
a.logger.Error("bootstrap: failed to read path",
|
||||||
|
"path", path,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -73,18 +78,18 @@ func (a *ACL) fileBootstrapResetIndex() uint64 {
|
||||||
// Attempt to parse the file
|
// Attempt to parse the file
|
||||||
var resetIdx uint64
|
var resetIdx uint64
|
||||||
if _, err := fmt.Sscanf(string(raw), "%d", &resetIdx); err != nil {
|
if _, err := fmt.Sscanf(string(raw), "%d", &resetIdx); err != nil {
|
||||||
a.srv.logger.Printf("[ERR] acl.bootstrap: failed to parse %q: %v", path, err)
|
a.logger.Error("failed to parse bootstrap reset index path", "path", path, "error", err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the reset index
|
// Return the reset index
|
||||||
a.srv.logger.Printf("[DEBUG] acl.bootstrap: parsed %q: reset index %d", path, resetIdx)
|
a.logger.Debug("parsed bootstrap reset index path", "path", path, "reset_index", resetIdx)
|
||||||
return resetIdx
|
return resetIdx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACL) removeBootstrapResetFile() {
|
func (a *ACL) removeBootstrapResetFile() {
|
||||||
if err := os.Remove(filepath.Join(a.srv.config.DataDir, aclBootstrapReset)); err != nil {
|
if err := os.Remove(filepath.Join(a.srv.config.DataDir, aclBootstrapReset)); err != nil {
|
||||||
a.srv.logger.Printf("[WARN] acl.bootstrap: failed to remove bootstrap file: %v", err)
|
a.logger.Warn("failed to remove bootstrap file", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +187,7 @@ func (a *ACL) BootstrapTokens(args *structs.DCSpecificRequest, reply *structs.AC
|
||||||
*reply = *token
|
*reply = *token
|
||||||
}
|
}
|
||||||
|
|
||||||
a.srv.logger.Printf("[INFO] consul.acl: ACL bootstrap completed")
|
a.logger.Info("ACL bootstrap completed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,10 +65,10 @@ func (a *ACL) Bootstrap(args *structs.DCSpecificRequest, reply *structs.ACL) err
|
||||||
default:
|
default:
|
||||||
// Just log this, since it looks like the bootstrap may have
|
// Just log this, since it looks like the bootstrap may have
|
||||||
// completed.
|
// completed.
|
||||||
a.srv.logger.Printf("[ERR] consul.acl: Unexpected response during bootstrap: %T", v)
|
a.logger.Error("Unexpected response during bootstrap", "type", fmt.Sprintf("%T", v))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.srv.logger.Printf("[INFO] consul.acl: ACL bootstrap completed")
|
a.logger.Info("ACL bootstrap completed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ func aclApplyInternal(srv *Server, args *structs.ACLRequest, reply *string) erro
|
||||||
// Apply the update
|
// Apply the update
|
||||||
resp, err := srv.raftApply(structs.ACLRequestType, args)
|
resp, err := srv.raftApply(structs.ACLRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srv.logger.Printf("[ERR] consul.acl: Apply failed: %v", err)
|
srv.logger.Error("Raft apply failed", "acl_op", args.Op, "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
|
|
@ -3,10 +3,9 @@
|
||||||
package consul
|
package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EnterpriseACLResolverDelegate stub
|
// EnterpriseACLResolverDelegate stub
|
||||||
|
@ -16,7 +15,7 @@ func (s *Server) replicationEnterpriseMeta() *structs.EnterpriseMeta {
|
||||||
return structs.ReplicationEnterpriseMeta()
|
return structs.ReplicationEnterpriseMeta()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newACLConfig(*log.Logger) *acl.Config {
|
func newACLConfig(hclog.Logger) *acl.Config {
|
||||||
return &acl.Config{
|
return &acl.Config{
|
||||||
WildcardName: structs.WildcardSpecifier,
|
WildcardName: structs.WildcardSpecifier,
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
metrics "github.com/armon/go-metrics"
|
metrics "github.com/armon/go-metrics"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -263,7 +264,7 @@ func (s *Server) deleteLocalACLType(ctx context.Context, tr aclTypeReplicator, d
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) updateLocalACLType(ctx context.Context, tr aclTypeReplicator) (bool, error) {
|
func (s *Server) updateLocalACLType(ctx context.Context, logger hclog.Logger, tr aclTypeReplicator) (bool, error) {
|
||||||
ticker := time.NewTicker(time.Second / time.Duration(s.config.ACLReplicationApplyLimit))
|
ticker := time.NewTicker(time.Second / time.Duration(s.config.ACLReplicationApplyLimit))
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
@ -289,12 +290,10 @@ func (s *Server) updateLocalACLType(ctx context.Context, tr aclTypeReplicator) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("Failed to apply %s upserts: %v", tr.SingularNoun(), err)
|
return false, fmt.Errorf("Failed to apply %s upserts: %v", tr.SingularNoun(), err)
|
||||||
}
|
}
|
||||||
s.logger.Printf(
|
logger.Debug(
|
||||||
"[DEBUG] acl: %s replication - upserted 1 batch with %d %s of size %d",
|
"acl replication - upserted batch",
|
||||||
tr.SingularNoun(),
|
"number_upserted", batchEnd-batchStart,
|
||||||
batchEnd-batchStart,
|
"batch_size", batchSize,
|
||||||
tr.PluralNoun(),
|
|
||||||
batchSize,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// items[batchEnd] wasn't include as the slicing doesn't include the element at the stop index
|
// items[batchEnd] wasn't include as the slicing doesn't include the element at the stop index
|
||||||
|
@ -353,28 +352,28 @@ func (s *Server) fetchACLTokens(lastRemoteIndex uint64) (*structs.ACLTokenListRe
|
||||||
return &response, nil
|
return &response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) replicateACLTokens(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (s *Server) replicateACLTokens(ctx context.Context, logger hclog.Logger, lastRemoteIndex uint64) (uint64, bool, error) {
|
||||||
tr := &aclTokenReplicator{}
|
tr := &aclTokenReplicator{}
|
||||||
return s.replicateACLType(ctx, tr, lastRemoteIndex)
|
return s.replicateACLType(ctx, logger, tr, lastRemoteIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) replicateACLPolicies(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (s *Server) replicateACLPolicies(ctx context.Context, logger hclog.Logger, lastRemoteIndex uint64) (uint64, bool, error) {
|
||||||
tr := &aclPolicyReplicator{}
|
tr := &aclPolicyReplicator{}
|
||||||
return s.replicateACLType(ctx, tr, lastRemoteIndex)
|
return s.replicateACLType(ctx, logger, tr, lastRemoteIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) replicateACLRoles(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (s *Server) replicateACLRoles(ctx context.Context, logger hclog.Logger, lastRemoteIndex uint64) (uint64, bool, error) {
|
||||||
tr := &aclRoleReplicator{}
|
tr := &aclRoleReplicator{}
|
||||||
return s.replicateACLType(ctx, tr, lastRemoteIndex)
|
return s.replicateACLType(ctx, logger, tr, lastRemoteIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) replicateACLType(ctx context.Context, tr aclTypeReplicator, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (s *Server) replicateACLType(ctx context.Context, logger hclog.Logger, tr aclTypeReplicator, lastRemoteIndex uint64) (uint64, bool, error) {
|
||||||
lenRemote, remoteIndex, err := tr.FetchRemote(s, lastRemoteIndex)
|
lenRemote, remoteIndex, err := tr.FetchRemote(s, lastRemoteIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to retrieve remote ACL %s: %v", tr.PluralNoun(), err)
|
return 0, false, fmt.Errorf("failed to retrieve remote ACL %s: %v", tr.PluralNoun(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] acl: finished fetching %s: %d", tr.PluralNoun(), lenRemote)
|
logger.Debug("finished fetching acls", "amount", lenRemote)
|
||||||
|
|
||||||
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
||||||
// RPC which could have been hanging around for a long time and during that time leadership could
|
// RPC which could have been hanging around for a long time and during that time leadership could
|
||||||
|
@ -400,39 +399,34 @@ func (s *Server) replicateACLType(ctx context.Context, tr aclTypeReplicator, las
|
||||||
// the remote side was rebuilt and we should do a full sync since we
|
// the remote side was rebuilt and we should do a full sync since we
|
||||||
// can't make any assumptions about what's going on.
|
// can't make any assumptions about what's going on.
|
||||||
if remoteIndex < lastRemoteIndex {
|
if remoteIndex < lastRemoteIndex {
|
||||||
s.logger.Printf(
|
logger.Warn(
|
||||||
"[WARN] consul: ACL %s replication remote index moved backwards (%d to %d), forcing a full ACL %s sync",
|
"ACL replication remote index moved backwards, forcing a full ACL sync",
|
||||||
tr.SingularNoun(),
|
"from", lastRemoteIndex,
|
||||||
lastRemoteIndex,
|
"to", remoteIndex,
|
||||||
remoteIndex,
|
|
||||||
tr.SingularNoun(),
|
|
||||||
)
|
)
|
||||||
lastRemoteIndex = 0
|
lastRemoteIndex = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf(
|
logger.Debug(
|
||||||
"[DEBUG] acl: %s replication - local: %d, remote: %d",
|
"acl replication",
|
||||||
tr.SingularNoun(),
|
"local", lenLocal,
|
||||||
lenLocal,
|
"remote", lenRemote,
|
||||||
lenRemote,
|
|
||||||
)
|
)
|
||||||
// Calculate the changes required to bring the state into sync and then apply them.
|
// Calculate the changes required to bring the state into sync and then apply them.
|
||||||
res := diffACLType(tr, lastRemoteIndex)
|
res := diffACLType(tr, lastRemoteIndex)
|
||||||
if res.LocalSkipped > 0 || res.RemoteSkipped > 0 {
|
if res.LocalSkipped > 0 || res.RemoteSkipped > 0 {
|
||||||
s.logger.Printf(
|
logger.Debug(
|
||||||
"[DEBUG] acl: %s replication - deletions: %d, updates: %d, skipped: %d, skippedRemote: %d",
|
"acl replication",
|
||||||
tr.SingularNoun(),
|
"deletions", len(res.LocalDeletes),
|
||||||
len(res.LocalDeletes),
|
"updates", len(res.LocalUpserts),
|
||||||
len(res.LocalUpserts),
|
"skipped", res.LocalSkipped,
|
||||||
res.LocalSkipped,
|
"skipped_remote", res.RemoteSkipped,
|
||||||
res.RemoteSkipped,
|
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
s.logger.Printf(
|
logger.Debug(
|
||||||
"[DEBUG] acl: %s replication - deletions: %d, updates: %d",
|
"acl replication",
|
||||||
tr.SingularNoun(),
|
"deletions", len(res.LocalDeletes),
|
||||||
len(res.LocalDeletes),
|
"updates", len(res.LocalUpserts),
|
||||||
len(res.LocalUpserts),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,18 +437,16 @@ func (s *Server) replicateACLType(ctx context.Context, tr aclTypeReplicator, las
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to retrieve ACL %s updates: %v", tr.SingularNoun(), err)
|
return 0, false, fmt.Errorf("failed to retrieve ACL %s updates: %v", tr.SingularNoun(), err)
|
||||||
}
|
}
|
||||||
s.logger.Printf(
|
logger.Debug(
|
||||||
"[DEBUG] acl: %s replication - downloaded %d %s",
|
"acl replication - downloaded updates",
|
||||||
tr.SingularNoun(),
|
"amount", lenUpdated,
|
||||||
lenUpdated,
|
|
||||||
tr.PluralNoun(),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(res.LocalDeletes) > 0 {
|
if len(res.LocalDeletes) > 0 {
|
||||||
s.logger.Printf(
|
logger.Debug(
|
||||||
"[DEBUG] acl: %s replication - performing deletions",
|
"acl replication - performing deletions",
|
||||||
tr.SingularNoun(),
|
"amount", len(res.LocalDeletes),
|
||||||
)
|
)
|
||||||
|
|
||||||
exit, err := s.deleteLocalACLType(ctx, tr, res.LocalDeletes)
|
exit, err := s.deleteLocalACLType(ctx, tr, res.LocalDeletes)
|
||||||
|
@ -464,19 +456,19 @@ func (s *Server) replicateACLType(ctx context.Context, tr aclTypeReplicator, las
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to delete local ACL %s: %v", tr.PluralNoun(), err)
|
return 0, false, fmt.Errorf("failed to delete local ACL %s: %v", tr.PluralNoun(), err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[DEBUG] acl: %s replication - finished deletions", tr.SingularNoun())
|
logger.Debug("acl replication - finished deletions")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(res.LocalUpserts) > 0 {
|
if len(res.LocalUpserts) > 0 {
|
||||||
s.logger.Printf("[DEBUG] acl: %s replication - performing updates", tr.SingularNoun())
|
logger.Debug("acl replication - performing updates")
|
||||||
exit, err := s.updateLocalACLType(ctx, tr)
|
exit, err := s.updateLocalACLType(ctx, logger, tr)
|
||||||
if exit {
|
if exit {
|
||||||
return 0, true, nil
|
return 0, true, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to update local ACL %s: %v", tr.PluralNoun(), err)
|
return 0, false, fmt.Errorf("failed to update local ACL %s: %v", tr.PluralNoun(), err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[DEBUG] acl: %s replication - finished updates", tr.SingularNoun())
|
logger.Debug("acl replication - finished updates")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the index we got back from the remote side, since we've synced
|
// Return the index we got back from the remote side, since we've synced
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
metrics "github.com/armon/go-metrics"
|
metrics "github.com/armon/go-metrics"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// aclIterator simplifies the algorithm below by providing a basic iterator that
|
// aclIterator simplifies the algorithm below by providing a basic iterator that
|
||||||
|
@ -218,7 +219,7 @@ func (s *Server) updateLocalLegacyACLs(changes structs.ACLRequests, ctx context.
|
||||||
// a remote ACL datacenter to local state. If there's any error, this will return
|
// a remote ACL datacenter to local state. If there's any error, this will return
|
||||||
// 0 for the lastRemoteIndex, which will cause us to immediately do a full sync
|
// 0 for the lastRemoteIndex, which will cause us to immediately do a full sync
|
||||||
// next time.
|
// next time.
|
||||||
func (s *Server) replicateLegacyACLs(lastRemoteIndex uint64, ctx context.Context) (uint64, bool, error) {
|
func (s *Server) replicateLegacyACLs(ctx context.Context, logger hclog.Logger, lastRemoteIndex uint64) (uint64, bool, error) {
|
||||||
remote, err := s.fetchRemoteLegacyACLs(lastRemoteIndex)
|
remote, err := s.fetchRemoteLegacyACLs(lastRemoteIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to retrieve remote ACLs: %v", err)
|
return 0, false, fmt.Errorf("failed to retrieve remote ACLs: %v", err)
|
||||||
|
@ -248,7 +249,11 @@ func (s *Server) replicateLegacyACLs(lastRemoteIndex uint64, ctx context.Context
|
||||||
// the remote side was rebuilt and we should do a full sync since we
|
// the remote side was rebuilt and we should do a full sync since we
|
||||||
// can't make any assumptions about what's going on.
|
// can't make any assumptions about what's going on.
|
||||||
if remote.QueryMeta.Index < lastRemoteIndex {
|
if remote.QueryMeta.Index < lastRemoteIndex {
|
||||||
s.logger.Printf("[WARN] consul: Legacy ACL replication remote index moved backwards (%d to %d), forcing a full ACL sync", lastRemoteIndex, remote.QueryMeta.Index)
|
logger.Warn(
|
||||||
|
"Legacy ACL replication remote index moved backwards, forcing a full ACL sync",
|
||||||
|
"from", lastRemoteIndex,
|
||||||
|
"to", remote.QueryMeta.Index,
|
||||||
|
)
|
||||||
lastRemoteIndex = 0
|
lastRemoteIndex = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -622,7 +622,7 @@ func newTestACLResolver(t *testing.T, delegate ACLResolverDelegate, cb func(*ACL
|
||||||
config.ACLDownPolicy = "extend-cache"
|
config.ACLDownPolicy = "extend-cache"
|
||||||
rconf := &ACLResolverConfig{
|
rconf := &ACLResolverConfig{
|
||||||
Config: config,
|
Config: config,
|
||||||
Logger: testutil.TestLoggerWithName(t, t.Name()),
|
Logger: testutil.LoggerWithName(t, t.Name()),
|
||||||
CacheConfig: &structs.ACLCachesConfig{
|
CacheConfig: &structs.ACLCachesConfig{
|
||||||
Identities: 4,
|
Identities: 4,
|
||||||
Policies: 4,
|
Policies: 4,
|
||||||
|
|
|
@ -18,12 +18,12 @@ func (s *Server) reapExpiredTokens(ctx context.Context) error {
|
||||||
|
|
||||||
if s.LocalTokensEnabled() {
|
if s.LocalTokensEnabled() {
|
||||||
if _, err := s.reapExpiredLocalACLTokens(); err != nil {
|
if _, err := s.reapExpiredLocalACLTokens(); err != nil {
|
||||||
s.logger.Printf("[ERR] acl: error reaping expired local ACL tokens: %v", err)
|
s.logger.Error("error reaping expired local ACL tokens", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if s.InACLDatacenter() {
|
if s.InACLDatacenter() {
|
||||||
if _, err := s.reapExpiredGlobalACLTokens(); err != nil {
|
if _, err := s.reapExpiredGlobalACLTokens(); err != nil {
|
||||||
s.logger.Printf("[ERR] acl: error reaping expired global ACL tokens: %v", err)
|
s.logger.Error("error reaping expired global ACL tokens", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,10 @@ func (s *Server) reapExpiredACLTokens(local, global bool) (int, error) {
|
||||||
secretIDs = append(secretIDs, token.SecretID)
|
secretIDs = append(secretIDs, token.SecretID)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[INFO] acl: deleting %d expired %s tokens", len(req.TokenIDs), locality)
|
s.logger.Info("deleting expired ACL tokens",
|
||||||
|
"amount", len(req.TokenIDs),
|
||||||
|
"locality", locality,
|
||||||
|
)
|
||||||
resp, err := s.raftApply(structs.ACLTokenDeleteRequestType, &req)
|
resp, err := s.raftApply(structs.ACLTokenDeleteRequestType, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("Failed to apply token expiration deletions: %v", err)
|
return 0, fmt.Errorf("Failed to apply token expiration deletions: %v", err)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -10,6 +9,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ func (c *Client) RequestAutoEncryptCerts(servers []string, port int, token strin
|
||||||
for _, s := range servers {
|
for _, s := range servers {
|
||||||
ips, err := resolveAddr(s, c.logger)
|
ips, err := resolveAddr(s, c.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Printf("[WARN] agent: AutoEncrypt resolveAddr failed: %v", err)
|
c.logger.Warn("AutoEncrypt resolveAddr failed", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ func (c *Client) RequestAutoEncryptCerts(servers []string, port int, token strin
|
||||||
if err = c.connPool.RPC(c.config.Datacenter, &addr, 0, "AutoEncrypt.Sign", true, &args, &reply); err == nil {
|
if err = c.connPool.RPC(c.config.Datacenter, &addr, 0, "AutoEncrypt.Sign", true, &args, &reply); err == nil {
|
||||||
return &reply, pkPEM, nil
|
return &reply, pkPEM, nil
|
||||||
} else {
|
} else {
|
||||||
c.logger.Printf("[WARN] agent: AutoEncrypt failed: %v", err)
|
c.logger.Warn("AutoEncrypt failed", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func (c *Client) RequestAutoEncryptCerts(servers []string, port int, token strin
|
||||||
|
|
||||||
delay := lib.RandomStagger(retryJitterWindow)
|
delay := lib.RandomStagger(retryJitterWindow)
|
||||||
interval := (time.Duration(attempts) * delay) + delay
|
interval := (time.Duration(attempts) * delay) + delay
|
||||||
c.logger.Printf("[WARN] agent: retrying AutoEncrypt in %v", interval)
|
c.logger.Warn("retrying AutoEncrypt", "retry_interval", interval)
|
||||||
select {
|
select {
|
||||||
case <-time.After(interval):
|
case <-time.After(interval):
|
||||||
continue
|
continue
|
||||||
|
@ -137,7 +137,7 @@ func missingPortError(host string, err error) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveAddr is used to resolve the host into IPs and error.
|
// resolveAddr is used to resolve the host into IPs and error.
|
||||||
func resolveAddr(rawHost string, logger *log.Logger) ([]net.IP, error) {
|
func resolveAddr(rawHost string, logger hclog.Logger) ([]net.IP, error) {
|
||||||
host, _, err := net.SplitHostPort(rawHost)
|
host, _, err := net.SplitHostPort(rawHost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// In case we encounter this error, we proceed with the
|
// In case we encounter this error, we proceed with the
|
||||||
|
@ -158,7 +158,7 @@ func resolveAddr(rawHost string, logger *log.Logger) ([]net.IP, error) {
|
||||||
// hosts to join. If this fails it's not fatal since this isn't a standard
|
// hosts to join. If this fails it's not fatal since this isn't a standard
|
||||||
// way to query DNS, and we have a fallback below.
|
// way to query DNS, and we have a fallback below.
|
||||||
if ips, err := tcpLookupIP(host, logger); err != nil {
|
if ips, err := tcpLookupIP(host, logger); err != nil {
|
||||||
logger.Printf("[DEBUG] agent: TCP-first lookup failed for '%s', falling back to UDP: %s", host, err)
|
logger.Debug("TCP-first lookup failed for host, falling back to UDP", "host", host, "error", err)
|
||||||
} else if len(ips) > 0 {
|
} else if len(ips) > 0 {
|
||||||
return ips, nil
|
return ips, nil
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ func resolveAddr(rawHost string, logger *log.Logger) ([]net.IP, error) {
|
||||||
// Consul's. By doing the TCP lookup directly, we get the best chance for the
|
// Consul's. By doing the TCP lookup directly, we get the best chance for the
|
||||||
// largest list of hosts to join. Since joins are relatively rare events, it's ok
|
// largest list of hosts to join. Since joins are relatively rare events, it's ok
|
||||||
// to do this rather expensive operation.
|
// to do this rather expensive operation.
|
||||||
func tcpLookupIP(host string, logger *log.Logger) ([]net.IP, error) {
|
func tcpLookupIP(host string, logger hclog.Logger) ([]net.IP, error) {
|
||||||
// Don't attempt any TCP lookups against non-fully qualified domain
|
// Don't attempt any TCP lookups against non-fully qualified domain
|
||||||
// names, since those will likely come from the resolv.conf file.
|
// names, since those will likely come from the resolv.conf file.
|
||||||
if !strings.Contains(host, ".") {
|
if !strings.Contains(host, ".") {
|
||||||
|
@ -218,7 +218,7 @@ func tcpLookupIP(host string, logger *log.Logger) ([]net.IP, error) {
|
||||||
case (*dns.AAAA):
|
case (*dns.AAAA):
|
||||||
ips = append(ips, rr.AAAA)
|
ips = append(ips, rr.AAAA)
|
||||||
case (*dns.CNAME):
|
case (*dns.CNAME):
|
||||||
logger.Printf("[DEBUG] agent: Ignoring CNAME RR in TCP-first answer for '%s'", host)
|
logger.Debug("Ignoring CNAME RR in TCP-first answer for host", "host", host)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ips, nil
|
return ips, nil
|
||||||
|
|
|
@ -1,20 +1,23 @@
|
||||||
package consul
|
package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAutoEncrypt_resolveAddr(t *testing.T) {
|
func TestAutoEncrypt_resolveAddr(t *testing.T) {
|
||||||
type args struct {
|
type args struct {
|
||||||
rawHost string
|
rawHost string
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
logger := testutil.Logger(t)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
args args
|
args args
|
||||||
|
@ -25,7 +28,7 @@ func TestAutoEncrypt_resolveAddr(t *testing.T) {
|
||||||
name: "host without port",
|
name: "host without port",
|
||||||
args: args{
|
args: args{
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
log.New(os.Stderr, "", log.LstdFlags),
|
logger,
|
||||||
},
|
},
|
||||||
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
|
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
|
@ -34,7 +37,7 @@ func TestAutoEncrypt_resolveAddr(t *testing.T) {
|
||||||
name: "host with port",
|
name: "host with port",
|
||||||
args: args{
|
args: args{
|
||||||
"127.0.0.1:1234",
|
"127.0.0.1:1234",
|
||||||
log.New(os.Stderr, "", log.LstdFlags),
|
logger,
|
||||||
},
|
},
|
||||||
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
|
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
|
@ -43,7 +46,7 @@ func TestAutoEncrypt_resolveAddr(t *testing.T) {
|
||||||
name: "host with broken port",
|
name: "host with broken port",
|
||||||
args: args{
|
args: args{
|
||||||
"127.0.0.1:xyz",
|
"127.0.0.1:xyz",
|
||||||
log.New(os.Stderr, "", log.LstdFlags),
|
logger,
|
||||||
},
|
},
|
||||||
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
|
ips: []net.IP{net.IPv4(127, 0, 0, 1)},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
|
@ -52,7 +55,7 @@ func TestAutoEncrypt_resolveAddr(t *testing.T) {
|
||||||
name: "not an address",
|
name: "not an address",
|
||||||
args: args{
|
args: args{
|
||||||
"abc",
|
"abc",
|
||||||
log.New(os.Stderr, "", log.LstdFlags),
|
logger,
|
||||||
},
|
},
|
||||||
ips: nil,
|
ips: nil,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
|
|
|
@ -3,15 +3,15 @@ package autopilot
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
"github.com/hashicorp/go-version"
|
||||||
|
"github.com/hashicorp/raft"
|
||||||
|
"github.com/hashicorp/serf/serf"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/go-version"
|
|
||||||
"github.com/hashicorp/raft"
|
|
||||||
"github.com/hashicorp/serf/serf"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Delegate is the interface for the Autopilot mechanism
|
// Delegate is the interface for the Autopilot mechanism
|
||||||
|
@ -30,7 +30,7 @@ type Delegate interface {
|
||||||
// quorum using server health information along with updates from Serf gossip.
|
// quorum using server health information along with updates from Serf gossip.
|
||||||
// For more information, see https://www.consul.io/docs/guides/autopilot.html
|
// For more information, see https://www.consul.io/docs/guides/autopilot.html
|
||||||
type Autopilot struct {
|
type Autopilot struct {
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
delegate Delegate
|
delegate Delegate
|
||||||
|
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
@ -54,9 +54,9 @@ type ServerInfo struct {
|
||||||
Status serf.MemberStatus
|
Status serf.MemberStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAutopilot(logger *log.Logger, delegate Delegate, interval, healthInterval time.Duration) *Autopilot {
|
func NewAutopilot(logger hclog.Logger, delegate Delegate, interval, healthInterval time.Duration) *Autopilot {
|
||||||
return &Autopilot{
|
return &Autopilot{
|
||||||
logger: logger,
|
logger: logger.Named(logging.Autopilot),
|
||||||
delegate: delegate,
|
delegate: delegate,
|
||||||
interval: interval,
|
interval: interval,
|
||||||
healthInterval: healthInterval,
|
healthInterval: healthInterval,
|
||||||
|
@ -111,15 +111,15 @@ func (a *Autopilot) run() {
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if err := a.promoteServers(); err != nil {
|
if err := a.promoteServers(); err != nil {
|
||||||
a.logger.Printf("[ERR] autopilot: Error promoting servers: %v", err)
|
a.logger.Error("Error promoting servers", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.pruneDeadServers(); err != nil {
|
if err := a.pruneDeadServers(); err != nil {
|
||||||
a.logger.Printf("[ERR] autopilot: Error checking for dead servers to remove: %s", err)
|
a.logger.Error("Error checking for dead servers to remove", "error", err)
|
||||||
}
|
}
|
||||||
case <-a.removeDeadCh:
|
case <-a.removeDeadCh:
|
||||||
if err := a.pruneDeadServers(); err != nil {
|
if err := a.pruneDeadServers(); err != nil {
|
||||||
a.logger.Printf("[ERR] autopilot: Error checking for dead servers to remove: %s", err)
|
a.logger.Error("Error checking for dead servers to remove", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -214,7 +214,7 @@ func (a *Autopilot) pruneDeadServers() error {
|
||||||
for _, member := range serfLAN.Members() {
|
for _, member := range serfLAN.Members() {
|
||||||
server, err := a.delegate.IsServer(member)
|
server, err := a.delegate.IsServer(member)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[INFO] autopilot: Error parsing server info for %q: %s", member.Name, err)
|
a.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if server != nil {
|
if server != nil {
|
||||||
|
@ -227,7 +227,7 @@ func (a *Autopilot) pruneDeadServers() error {
|
||||||
if member.Status == serf.StatusFailed {
|
if member.Status == serf.StatusFailed {
|
||||||
// If the node is a nonvoter, we can remove it immediately.
|
// If the node is a nonvoter, we can remove it immediately.
|
||||||
if found && s.Suffrage == raft.Nonvoter {
|
if found && s.Suffrage == raft.Nonvoter {
|
||||||
a.logger.Printf("[INFO] autopilot: Attempting removal of failed server node %q", member.Name)
|
a.logger.Info("Attempting removal of failed server node", "name", member.Name)
|
||||||
go serfLAN.RemoveFailedNode(member.Name)
|
go serfLAN.RemoveFailedNode(member.Name)
|
||||||
if serfWAN != nil {
|
if serfWAN != nil {
|
||||||
go serfWAN.RemoveFailedNode(member.Name)
|
go serfWAN.RemoveFailedNode(member.Name)
|
||||||
|
@ -248,12 +248,12 @@ func (a *Autopilot) pruneDeadServers() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, msg := canRemoveServers(NumPeers(raftConfig), int(conf.MinQuorum), deadServers); !ok {
|
if ok, msg := canRemoveServers(NumPeers(raftConfig), int(conf.MinQuorum), deadServers); !ok {
|
||||||
a.logger.Printf("[DEBUG] autopilot: Failed to remove dead servers: %s.", msg)
|
a.logger.Debug("Failed to remove dead servers", "error", msg)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range failed {
|
for _, node := range failed {
|
||||||
a.logger.Printf("[INFO] autopilot: Attempting removal of failed server node %q", node.Name)
|
a.logger.Info("Attempting removal of failed server node", "name", node.Name)
|
||||||
go serfLAN.RemoveFailedNode(node.Name)
|
go serfLAN.RemoveFailedNode(node.Name)
|
||||||
if serfWAN != nil {
|
if serfWAN != nil {
|
||||||
go serfWAN.RemoveFailedNode(fmt.Sprintf("%s.%s", node.Name, node.Tags["dc"]))
|
go serfWAN.RemoveFailedNode(fmt.Sprintf("%s.%s", node.Name, node.Tags["dc"]))
|
||||||
|
@ -266,7 +266,7 @@ func (a *Autopilot) pruneDeadServers() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, raftServer := range staleRaftServers {
|
for _, raftServer := range staleRaftServers {
|
||||||
a.logger.Printf("[INFO] autopilot: Attempting removal of stale %s", fmtServer(raftServer))
|
a.logger.Info("Attempting removal of stale server", "server", fmtServer(raftServer))
|
||||||
var future raft.Future
|
var future raft.Future
|
||||||
if minRaftProtocol >= 2 {
|
if minRaftProtocol >= 2 {
|
||||||
future = raftNode.RemoveServer(raftServer.ID, 0, 0)
|
future = raftNode.RemoveServer(raftServer.ID, 0, 0)
|
||||||
|
@ -334,7 +334,7 @@ func (a *Autopilot) handlePromotions(promotions []raft.Server) error {
|
||||||
// to promote early than remove early, so by promoting as soon as
|
// to promote early than remove early, so by promoting as soon as
|
||||||
// possible we have chosen that as the solution here.
|
// possible we have chosen that as the solution here.
|
||||||
for _, server := range promotions {
|
for _, server := range promotions {
|
||||||
a.logger.Printf("[INFO] autopilot: Promoting %s to voter", fmtServer(server))
|
a.logger.Info("Promoting server to voter", "server", fmtServer(server))
|
||||||
addFuture := a.delegate.Raft().AddVoter(server.ID, server.Address, 0, 0)
|
addFuture := a.delegate.Raft().AddVoter(server.ID, server.Address, 0, 0)
|
||||||
if err := addFuture.Error(); err != nil {
|
if err := addFuture.Error(); err != nil {
|
||||||
return fmt.Errorf("failed to add raft peer: %v", err)
|
return fmt.Errorf("failed to add raft peer: %v", err)
|
||||||
|
@ -365,7 +365,7 @@ func (a *Autopilot) serverHealthLoop() {
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if err := a.updateClusterHealth(); err != nil {
|
if err := a.updateClusterHealth(); err != nil {
|
||||||
a.logger.Printf("[ERR] autopilot: Error updating cluster health: %s", err)
|
a.logger.Error("Error updating cluster health", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -399,7 +399,7 @@ func (a *Autopilot) updateClusterHealth() error {
|
||||||
|
|
||||||
server, err := a.delegate.IsServer(member)
|
server, err := a.delegate.IsServer(member)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[INFO] autopilot: Error parsing server info for %q: %s", member.Name, err)
|
a.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if server != nil {
|
if server != nil {
|
||||||
|
@ -452,7 +452,7 @@ func (a *Autopilot) updateClusterHealth() error {
|
||||||
health.Version = parts.Build.String()
|
health.Version = parts.Build.String()
|
||||||
if stats, ok := fetchedStats[string(server.ID)]; ok {
|
if stats, ok := fetchedStats[string(server.ID)]; ok {
|
||||||
if err := a.updateServerHealth(&health, parts, stats, autopilotConf, targetLastIndex); err != nil {
|
if err := a.updateServerHealth(&health, parts, stats, autopilotConf, targetLastIndex); err != nil {
|
||||||
a.logger.Printf("[WARN] autopilot: Error updating server %s health: %s", fmtServer(server), err)
|
a.logger.Warn("Error updating server health", "server", fmtServer(server), "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -3,7 +3,6 @@ package consul
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -15,7 +14,9 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/router"
|
"github.com/hashicorp/consul/agent/router"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
@ -72,7 +73,7 @@ type Client struct {
|
||||||
eventCh chan serf.Event
|
eventCh chan serf.Event
|
||||||
|
|
||||||
// Logger uses the provided LogOutput
|
// Logger uses the provided LogOutput
|
||||||
logger *log.Logger
|
logger hclog.InterceptLogger
|
||||||
|
|
||||||
// serf is the Serf cluster maintained inside the DC
|
// serf is the Serf cluster maintained inside the DC
|
||||||
// which contains all the DC nodes
|
// which contains all the DC nodes
|
||||||
|
@ -100,7 +101,7 @@ func NewClient(config *Config) (*Client, error) {
|
||||||
return NewClientLogger(config, nil, c)
|
return NewClientLogger(config, nil, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClientLogger(config *Config, logger *log.Logger, tlsConfigurator *tlsutil.Configurator) (*Client, error) {
|
func NewClientLogger(config *Config, logger hclog.InterceptLogger, tlsConfigurator *tlsutil.Configurator) (*Client, error) {
|
||||||
// Check the protocol version
|
// Check the protocol version
|
||||||
if err := config.CheckProtocolVersion(); err != nil {
|
if err := config.CheckProtocolVersion(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -123,7 +124,10 @@ func NewClientLogger(config *Config, logger *log.Logger, tlsConfigurator *tlsuti
|
||||||
|
|
||||||
// Create a logger
|
// Create a logger
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.New(config.LogOutput, "", log.LstdFlags)
|
logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
|
Level: hclog.Debug,
|
||||||
|
Output: config.LogOutput,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
connPool := &pool.ConnPool{
|
connPool := &pool.ConnPool{
|
||||||
|
@ -140,7 +144,7 @@ func NewClientLogger(config *Config, logger *log.Logger, tlsConfigurator *tlsuti
|
||||||
config: config,
|
config: config,
|
||||||
connPool: connPool,
|
connPool: connPool,
|
||||||
eventCh: make(chan serf.Event, serfEventBacklog),
|
eventCh: make(chan serf.Event, serfEventBacklog),
|
||||||
logger: logger,
|
logger: logger.NamedIntercept(logging.ConsulClient),
|
||||||
shutdownCh: make(chan struct{}),
|
shutdownCh: make(chan struct{}),
|
||||||
tlsConfigurator: tlsConfigurator,
|
tlsConfigurator: tlsConfigurator,
|
||||||
}
|
}
|
||||||
|
@ -156,10 +160,10 @@ func NewClientLogger(config *Config, logger *log.Logger, tlsConfigurator *tlsuti
|
||||||
aclConfig := ACLResolverConfig{
|
aclConfig := ACLResolverConfig{
|
||||||
Config: config,
|
Config: config,
|
||||||
Delegate: c,
|
Delegate: c,
|
||||||
Logger: logger,
|
Logger: c.logger,
|
||||||
AutoDisable: true,
|
AutoDisable: true,
|
||||||
CacheConfig: clientACLCacheConfig,
|
CacheConfig: clientACLCacheConfig,
|
||||||
ACLConfig: newACLConfig(logger),
|
ACLConfig: newACLConfig(c.logger),
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
if c.acls, err = NewACLResolver(&aclConfig); err != nil {
|
if c.acls, err = NewACLResolver(&aclConfig); err != nil {
|
||||||
|
@ -197,7 +201,7 @@ func NewClientLogger(config *Config, logger *log.Logger, tlsConfigurator *tlsuti
|
||||||
|
|
||||||
// Shutdown is used to shutdown the client
|
// Shutdown is used to shutdown the client
|
||||||
func (c *Client) Shutdown() error {
|
func (c *Client) Shutdown() error {
|
||||||
c.logger.Printf("[INFO] consul: shutting down client")
|
c.logger.Info("shutting down client")
|
||||||
c.shutdownLock.Lock()
|
c.shutdownLock.Lock()
|
||||||
defer c.shutdownLock.Unlock()
|
defer c.shutdownLock.Unlock()
|
||||||
|
|
||||||
|
@ -222,12 +226,12 @@ func (c *Client) Shutdown() error {
|
||||||
|
|
||||||
// Leave is used to prepare for a graceful shutdown
|
// Leave is used to prepare for a graceful shutdown
|
||||||
func (c *Client) Leave() error {
|
func (c *Client) Leave() error {
|
||||||
c.logger.Printf("[INFO] consul: client starting leave")
|
c.logger.Info("client starting leave")
|
||||||
|
|
||||||
// Leave the LAN pool
|
// Leave the LAN pool
|
||||||
if c.serf != nil {
|
if c.serf != nil {
|
||||||
if err := c.serf.Leave(); err != nil {
|
if err := c.serf.Leave(); err != nil {
|
||||||
c.logger.Printf("[ERR] consul: Failed to leave LAN Serf cluster: %v", err)
|
c.logger.Error("Failed to leave LAN Serf cluster", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -313,7 +317,11 @@ TRY:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move off to another server, and see if we can retry.
|
// Move off to another server, and see if we can retry.
|
||||||
c.logger.Printf("[ERR] consul: %q RPC failed to server %s: %v", method, server.Addr, rpcErr)
|
c.logger.Error("RPC failed to server",
|
||||||
|
"method", method,
|
||||||
|
"server", server.Addr,
|
||||||
|
"error", rpcErr,
|
||||||
|
)
|
||||||
metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}})
|
metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}})
|
||||||
c.routers.NotifyFailedServer(server)
|
c.routers.NotifyFailedServer(server)
|
||||||
if retry := canRetry(args, rpcErr); !retry {
|
if retry := canRetry(args, rpcErr); !retry {
|
||||||
|
@ -357,7 +365,7 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := snap.Close(); err != nil {
|
if err := snap.Close(); err != nil {
|
||||||
c.logger.Printf("[WARN] consul: Failed closing snapshot stream: %v", err)
|
c.logger.Error("Failed closing snapshot stream", "error", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,12 +34,17 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
|
||||||
conf.Tags["acls"] = string(structs.ACLModeDisabled)
|
conf.Tags["acls"] = string(structs.ACLModeDisabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.logger == nil {
|
// We use the Intercept variant here to ensure that serf and memberlist logs
|
||||||
conf.MemberlistConfig.LogOutput = c.config.LogOutput
|
// can be streamed via the monitor endpoint
|
||||||
conf.LogOutput = c.config.LogOutput
|
serfLogger := c.logger.
|
||||||
}
|
NamedIntercept(logging.Serf).
|
||||||
conf.MemberlistConfig.Logger = c.logger
|
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true})
|
||||||
conf.Logger = c.logger
|
memberlistLogger := c.logger.
|
||||||
|
NamedIntercept(logging.Memberlist).
|
||||||
|
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true})
|
||||||
|
|
||||||
|
conf.MemberlistConfig.Logger = memberlistLogger
|
||||||
|
conf.Logger = serfLogger
|
||||||
conf.EventCh = ch
|
conf.EventCh = ch
|
||||||
conf.ProtocolVersion = protocolVersionMap[c.config.ProtocolVersion]
|
conf.ProtocolVersion = protocolVersionMap[c.config.ProtocolVersion]
|
||||||
conf.RejoinAfterLeave = c.config.RejoinAfterLeave
|
conf.RejoinAfterLeave = c.config.RejoinAfterLeave
|
||||||
|
@ -62,7 +69,10 @@ func (c *Client) lanEventHandler() {
|
||||||
for {
|
for {
|
||||||
numQueuedEvents = len(c.eventCh)
|
numQueuedEvents = len(c.eventCh)
|
||||||
if numQueuedEvents > serfEventBacklogWarning {
|
if numQueuedEvents > serfEventBacklogWarning {
|
||||||
c.logger.Printf("[WARN] consul: number of queued serf events above warning threshold: %d/%d", numQueuedEvents, serfEventBacklogWarning)
|
c.logger.Warn("number of queued serf events above warning threshold",
|
||||||
|
"queued_events", numQueuedEvents,
|
||||||
|
"warning_threshold", serfEventBacklogWarning,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
@ -77,7 +87,7 @@ func (c *Client) lanEventHandler() {
|
||||||
case serf.EventMemberUpdate: // Ignore
|
case serf.EventMemberUpdate: // Ignore
|
||||||
case serf.EventQuery: // Ignore
|
case serf.EventQuery: // Ignore
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul: unhandled LAN Serf Event: %#v", e)
|
c.logger.Warn("unhandled LAN Serf Event", "event", e)
|
||||||
}
|
}
|
||||||
case <-c.shutdownCh:
|
case <-c.shutdownCh:
|
||||||
return
|
return
|
||||||
|
@ -93,11 +103,13 @@ func (c *Client) nodeJoin(me serf.MemberEvent) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if parts.Datacenter != c.config.Datacenter {
|
if parts.Datacenter != c.config.Datacenter {
|
||||||
c.logger.Printf("[WARN] consul: server %s for datacenter %s has joined wrong cluster",
|
c.logger.Warn("server has joined the wrong cluster: wrong datacenter",
|
||||||
m.Name, parts.Datacenter)
|
"server", m.Name,
|
||||||
|
"datacenter", parts.Datacenter,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.logger.Printf("[INFO] consul: adding server %s", parts)
|
c.logger.Info("adding server", "server", parts)
|
||||||
c.routers.AddServer(parts)
|
c.routers.AddServer(parts)
|
||||||
|
|
||||||
// Trigger the callback
|
// Trigger the callback
|
||||||
|
@ -114,7 +126,7 @@ func (c *Client) nodeFail(me serf.MemberEvent) {
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.logger.Printf("[INFO] consul: removing server %s", parts)
|
c.logger.Info("removing server", "server", parts.String())
|
||||||
c.routers.RemoveServer(parts)
|
c.routers.RemoveServer(parts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -128,7 +140,7 @@ func (c *Client) localEvent(event serf.UserEvent) {
|
||||||
|
|
||||||
switch name := event.Name; {
|
switch name := event.Name; {
|
||||||
case name == newLeaderEvent:
|
case name == newLeaderEvent:
|
||||||
c.logger.Printf("[INFO] consul: New leader elected: %s", event.Payload)
|
c.logger.Info("New leader elected", "payload", string(event.Payload))
|
||||||
|
|
||||||
// Trigger the callback
|
// Trigger the callback
|
||||||
if c.config.ServerUp != nil {
|
if c.config.ServerUp != nil {
|
||||||
|
@ -136,7 +148,7 @@ func (c *Client) localEvent(event serf.UserEvent) {
|
||||||
}
|
}
|
||||||
case isUserEvent(name):
|
case isUserEvent(name):
|
||||||
event.Name = rawUserEventName(name)
|
event.Name = rawUserEventName(name)
|
||||||
c.logger.Printf("[DEBUG] consul: user event: %s", event.Name)
|
c.logger.Debug("user event", "name", event.Name)
|
||||||
|
|
||||||
// Trigger the callback
|
// Trigger the callback
|
||||||
if c.config.UserEventHandler != nil {
|
if c.config.UserEventHandler != nil {
|
||||||
|
@ -144,7 +156,7 @@ func (c *Client) localEvent(event serf.UserEvent) {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if !c.handleEnterpriseUserEvents(event) {
|
if !c.handleEnterpriseUserEvents(event) {
|
||||||
c.logger.Printf("[WARN] consul: Unhandled local event: %v", event)
|
c.logger.Warn("Unhandled local event", "event", event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
|
|
||||||
"github.com/armon/go-metrics"
|
"github.com/armon/go-metrics"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func cmpConfigLess(first structs.ConfigEntry, second structs.ConfigEntry) bool {
|
func cmpConfigLess(first structs.ConfigEntry, second structs.ConfigEntry) bool {
|
||||||
|
@ -116,13 +118,15 @@ func (s *Server) fetchConfigEntries(lastRemoteIndex uint64) (*structs.IndexedGen
|
||||||
return &response, nil
|
return &response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (uint64, bool, error) {
|
||||||
|
replicationLogger := s.loggers.Named(logging.Replication).Named(logging.ConfigEntry)
|
||||||
|
|
||||||
remote, err := s.fetchConfigEntries(lastRemoteIndex)
|
remote, err := s.fetchConfigEntries(lastRemoteIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to retrieve remote config entries: %v", err)
|
return 0, false, fmt.Errorf("failed to retrieve remote config entries: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] replication: finished fetching config entries: %d", len(remote.Entries))
|
replicationLogger.Debug("finished fetching config entries", "amount", len(remote.Entries))
|
||||||
|
|
||||||
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
||||||
// RPC which could have been hanging around for a long time and during that time leadership could
|
// RPC which could have been hanging around for a long time and during that time leadership could
|
||||||
|
@ -158,19 +162,30 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64) (u
|
||||||
// The lastRemoteIndex is not used when the entry exists either only in the local state or
|
// The lastRemoteIndex is not used when the entry exists either only in the local state or
|
||||||
// only in the remote state. In those situations we need to either delete it or create it.
|
// only in the remote state. In those situations we need to either delete it or create it.
|
||||||
if remote.QueryMeta.Index < lastRemoteIndex {
|
if remote.QueryMeta.Index < lastRemoteIndex {
|
||||||
s.logger.Printf("[WARN] replication: Config Entry replication remote index moved backwards (%d to %d), forcing a full Config Entry sync", lastRemoteIndex, remote.QueryMeta.Index)
|
replicationLogger.Warn("Config Entry replication remote index moved backwards, forcing a full Config Entry sync",
|
||||||
|
"from", lastRemoteIndex,
|
||||||
|
"to", remote.QueryMeta.Index,
|
||||||
|
)
|
||||||
lastRemoteIndex = 0
|
lastRemoteIndex = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] replication: Config Entry replication - local: %d, remote: %d", len(local), len(remote.Entries))
|
replicationLogger.Debug("Config Entry replication",
|
||||||
|
"local", len(local),
|
||||||
|
"remote", len(remote.Entries),
|
||||||
|
)
|
||||||
// Calculate the changes required to bring the state into sync and then
|
// Calculate the changes required to bring the state into sync and then
|
||||||
// apply them.
|
// apply them.
|
||||||
deletions, updates := diffConfigEntries(local, remote.Entries, lastRemoteIndex)
|
deletions, updates := diffConfigEntries(local, remote.Entries, lastRemoteIndex)
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] replication: Config Entry replication - deletions: %d, updates: %d", len(deletions), len(updates))
|
replicationLogger.Debug("Config Entry replication",
|
||||||
|
"deletions", len(deletions),
|
||||||
|
"updates", len(updates),
|
||||||
|
)
|
||||||
|
|
||||||
if len(deletions) > 0 {
|
if len(deletions) > 0 {
|
||||||
s.logger.Printf("[DEBUG] replication: Config Entry replication - performing %d deletions", len(deletions))
|
replicationLogger.Debug("Deleting local config entries",
|
||||||
|
"deletions", len(deletions),
|
||||||
|
)
|
||||||
|
|
||||||
exit, err := s.reconcileLocalConfig(ctx, deletions, structs.ConfigEntryDelete)
|
exit, err := s.reconcileLocalConfig(ctx, deletions, structs.ConfigEntryDelete)
|
||||||
if exit {
|
if exit {
|
||||||
|
@ -179,11 +194,13 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64) (u
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to delete local config entries: %v", err)
|
return 0, false, fmt.Errorf("failed to delete local config entries: %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[DEBUG] replication: Config Entry replication - finished deletions")
|
replicationLogger.Debug("Config Entry replication - finished deletions")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(updates) > 0 {
|
if len(updates) > 0 {
|
||||||
s.logger.Printf("[DEBUG] replication: Config Entry replication - performing %d updates", len(updates))
|
replicationLogger.Debug("Updating local config entries",
|
||||||
|
"updates", len(updates),
|
||||||
|
)
|
||||||
exit, err := s.reconcileLocalConfig(ctx, updates, structs.ConfigEntryUpsert)
|
exit, err := s.reconcileLocalConfig(ctx, updates, structs.ConfigEntryUpsert)
|
||||||
if exit {
|
if exit {
|
||||||
return 0, true, nil
|
return 0, true, nil
|
||||||
|
@ -191,7 +208,7 @@ func (s *Server) replicateConfig(ctx context.Context, lastRemoteIndex uint64) (u
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to update local config entries: %v", err)
|
return 0, false, fmt.Errorf("failed to update local config entries: %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[DEBUG] replication: Config Entry replication - finished updates")
|
replicationLogger.Debug("Config Entry replication - finished updates")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the index we got back from the remote side, since we've synced
|
// Return the index we got back from the remote side, since we've synced
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/lib/semaphore"
|
"github.com/hashicorp/consul/lib/semaphore"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
|
@ -51,6 +52,8 @@ type ConnectCA struct {
|
||||||
// srv is a pointer back to the server.
|
// srv is a pointer back to the server.
|
||||||
srv *Server
|
srv *Server
|
||||||
|
|
||||||
|
logger hclog.Logger
|
||||||
|
|
||||||
// csrRateLimiter limits the rate of signing new certs if configured. Lazily
|
// csrRateLimiter limits the rate of signing new certs if configured. Lazily
|
||||||
// initialized from current config to support dynamic changes.
|
// initialized from current config to support dynamic changes.
|
||||||
// csrRateLimiterMu must be held while dereferencing the pointer or storing a
|
// csrRateLimiterMu must be held while dereferencing the pointer or storing a
|
||||||
|
@ -251,7 +254,7 @@ func (s *ConnectCA) ConfigurationSet(
|
||||||
// If the config has been committed, update the local provider instance
|
// If the config has been committed, update the local provider instance
|
||||||
s.srv.setCAProvider(newProvider, newActiveRoot)
|
s.srv.setCAProvider(newProvider, newActiveRoot)
|
||||||
|
|
||||||
s.srv.logger.Printf("[INFO] connect: CA provider config updated")
|
s.logger.Info("CA provider config updated")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -276,7 +279,7 @@ func (s *ConnectCA) ConfigurationSet(
|
||||||
"disruption - see documentation for more.")
|
"disruption - see documentation for more.")
|
||||||
}
|
}
|
||||||
if !canXSign && args.Config.ForceWithoutCrossSigning {
|
if !canXSign && args.Config.ForceWithoutCrossSigning {
|
||||||
s.srv.logger.Println("[WARN] current CA doesn't support cross signing but " +
|
s.logger.Warn("current CA doesn't support cross signing but " +
|
||||||
"CA reconfiguration forced anyway with ForceWithoutCrossSigning")
|
"CA reconfiguration forced anyway with ForceWithoutCrossSigning")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -348,10 +351,10 @@ func (s *ConnectCA) ConfigurationSet(
|
||||||
s.srv.setCAProvider(newProvider, newActiveRoot)
|
s.srv.setCAProvider(newProvider, newActiveRoot)
|
||||||
|
|
||||||
if err := oldProvider.Cleanup(); err != nil {
|
if err := oldProvider.Cleanup(); err != nil {
|
||||||
s.srv.logger.Printf("[WARN] connect: failed to clean up old provider %q", config.Provider)
|
s.logger.Warn("failed to clean up old provider", "provider", config.Provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.srv.logger.Printf("[INFO] connect: CA rotated to new root under provider %q", args.Config.Provider)
|
s.logger.Info("CA rotated to new root under provider", "provider", args.Config.Provider)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,8 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,6 +19,8 @@ type Coordinate struct {
|
||||||
// srv is a pointer back to the server.
|
// srv is a pointer back to the server.
|
||||||
srv *Server
|
srv *Server
|
||||||
|
|
||||||
|
logger hclog.Logger
|
||||||
|
|
||||||
// updates holds pending coordinate updates for the given nodes. This is
|
// updates holds pending coordinate updates for the given nodes. This is
|
||||||
// keyed by node:segment so we can get a coordinate for each segment for
|
// keyed by node:segment so we can get a coordinate for each segment for
|
||||||
// servers, and we only track the latest update per node:segment.
|
// servers, and we only track the latest update per node:segment.
|
||||||
|
@ -27,9 +31,10 @@ type Coordinate struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCoordinate returns a new Coordinate endpoint.
|
// NewCoordinate returns a new Coordinate endpoint.
|
||||||
func NewCoordinate(srv *Server) *Coordinate {
|
func NewCoordinate(srv *Server, logger hclog.Logger) *Coordinate {
|
||||||
c := &Coordinate{
|
c := &Coordinate{
|
||||||
srv: srv,
|
srv: srv,
|
||||||
|
logger: logger.Named(logging.Coordinate),
|
||||||
updates: make(map[string]*structs.CoordinateUpdateRequest),
|
updates: make(map[string]*structs.CoordinateUpdateRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +49,7 @@ func (c *Coordinate) batchUpdate() {
|
||||||
select {
|
select {
|
||||||
case <-time.After(c.srv.config.CoordinateUpdatePeriod):
|
case <-time.After(c.srv.config.CoordinateUpdatePeriod):
|
||||||
if err := c.batchApplyUpdates(); err != nil {
|
if err := c.batchApplyUpdates(); err != nil {
|
||||||
c.srv.logger.Printf("[WARN] consul.coordinate: Batch update failed: %v", err)
|
c.logger.Warn("Batch update failed", "error", err)
|
||||||
}
|
}
|
||||||
case <-c.srv.shutdownCh:
|
case <-c.srv.shutdownCh:
|
||||||
return
|
return
|
||||||
|
@ -66,7 +71,7 @@ func (c *Coordinate) batchApplyUpdates() error {
|
||||||
limit := c.srv.config.CoordinateUpdateBatchSize * c.srv.config.CoordinateUpdateMaxBatches
|
limit := c.srv.config.CoordinateUpdateBatchSize * c.srv.config.CoordinateUpdateMaxBatches
|
||||||
size := len(pending)
|
size := len(pending)
|
||||||
if size > limit {
|
if size > limit {
|
||||||
c.srv.logger.Printf("[WARN] consul.coordinate: Discarded %d coordinate updates", size-limit)
|
c.logger.Warn("Discarded coordinate updates", "number_discarded", size-limit)
|
||||||
size = limit
|
size = limit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
|
||||||
|
|
||||||
// Apply all updates in a single transaction
|
// Apply all updates in a single transaction
|
||||||
if err := c.state.EnsureRegistration(index, &req); err != nil {
|
if err := c.state.EnsureRegistration(index, &req); err != nil {
|
||||||
c.logger.Printf("[WARN] consul.fsm: EnsureRegistration failed: %v", err)
|
c.logger.Warn("EnsureRegistration failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -65,17 +65,17 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} {
|
||||||
// make changes here, be sure to also adjust the code over there.
|
// make changes here, be sure to also adjust the code over there.
|
||||||
if req.ServiceID != "" {
|
if req.ServiceID != "" {
|
||||||
if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil {
|
if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil {
|
||||||
c.logger.Printf("[WARN] consul.fsm: DeleteNodeService failed: %v", err)
|
c.logger.Warn("DeleteNodeService failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if req.CheckID != "" {
|
} else if req.CheckID != "" {
|
||||||
if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil {
|
if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil {
|
||||||
c.logger.Printf("[WARN] consul.fsm: DeleteNodeCheck failed: %v", err)
|
c.logger.Warn("DeleteNodeCheck failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := c.state.DeleteNode(index, req.Node); err != nil {
|
if err := c.state.DeleteNode(index, req.Node); err != nil {
|
||||||
c.logger.Printf("[WARN] consul.fsm: DeleteNode failed: %v", err)
|
c.logger.Warn("DeleteNode failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ func (c *FSM) applyKVSOperation(buf []byte, index uint64) interface{} {
|
||||||
return act
|
return act
|
||||||
default:
|
default:
|
||||||
err := fmt.Errorf("Invalid KVS operation '%s'", req.Op)
|
err := fmt.Errorf("Invalid KVS operation '%s'", req.Op)
|
||||||
c.logger.Printf("[WARN] consul.fsm: %v", err)
|
c.logger.Warn("Invalid KVS operation", "operation", req.Op)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ func (c *FSM) applySessionOperation(buf []byte, index uint64) interface{} {
|
||||||
case structs.SessionDestroy:
|
case structs.SessionDestroy:
|
||||||
return c.state.SessionDestroy(index, req.Session.ID, &req.Session.EnterpriseMeta)
|
return c.state.SessionDestroy(index, req.Session.ID, &req.Session.EnterpriseMeta)
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul.fsm: Invalid Session operation '%s'", req.Op)
|
c.logger.Warn("Invalid Session operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid Session operation '%s'", req.Op)
|
return fmt.Errorf("Invalid Session operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func (c *FSM) applyACLOperation(buf []byte, index uint64) interface{} {
|
||||||
case structs.ACLDelete:
|
case structs.ACLDelete:
|
||||||
return c.state.ACLTokenDeleteBySecret(index, req.ACL.ID, nil)
|
return c.state.ACLTokenDeleteBySecret(index, req.ACL.ID, nil)
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul.fsm: Invalid ACL operation '%s'", req.Op)
|
c.logger.Warn("Invalid ACL operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid ACL operation '%s'", req.Op)
|
return fmt.Errorf("Invalid ACL operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ func (c *FSM) applyTombstoneOperation(buf []byte, index uint64) interface{} {
|
||||||
case structs.TombstoneReap:
|
case structs.TombstoneReap:
|
||||||
return c.state.ReapTombstones(req.ReapIndex)
|
return c.state.ReapTombstones(req.ReapIndex)
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul.fsm: Invalid Tombstone operation '%s'", req.Op)
|
c.logger.Warn("Invalid Tombstone operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid Tombstone operation '%s'", req.Op)
|
return fmt.Errorf("Invalid Tombstone operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ func (c *FSM) applyPreparedQueryOperation(buf []byte, index uint64) interface{}
|
||||||
case structs.PreparedQueryDelete:
|
case structs.PreparedQueryDelete:
|
||||||
return c.state.PreparedQueryDelete(index, req.Query.ID)
|
return c.state.PreparedQueryDelete(index, req.Query.ID)
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul.fsm: Invalid PreparedQuery operation '%s'", req.Op)
|
c.logger.Warn("Invalid PreparedQuery operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid PreparedQuery operation '%s'", req.Op)
|
return fmt.Errorf("Invalid PreparedQuery operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -295,7 +295,7 @@ func (c *FSM) applyIntentionOperation(buf []byte, index uint64) interface{} {
|
||||||
case structs.IntentionOpDelete:
|
case structs.IntentionOpDelete:
|
||||||
return c.state.IntentionDelete(index, req.Intention.ID)
|
return c.state.IntentionDelete(index, req.Intention.ID)
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul.fsm: Invalid Intention operation '%s'", req.Op)
|
c.logger.Warn("Invalid Intention operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid Intention operation '%s'", req.Op)
|
return fmt.Errorf("Invalid Intention operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -365,7 +365,7 @@ func (c *FSM) applyConnectCAOperation(buf []byte, index uint64) interface{} {
|
||||||
|
|
||||||
return sn
|
return sn
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul.fsm: Invalid CA operation '%s'", req.Op)
|
c.logger.Warn("Invalid CA operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid CA operation '%s'", req.Op)
|
return fmt.Errorf("Invalid CA operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -385,7 +385,7 @@ func (c *FSM) applyConnectCALeafOperation(buf []byte, index uint64) interface{}
|
||||||
}
|
}
|
||||||
return index
|
return index
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN consul.fsm: Invalid CA Leaf operation '%s'", req.Op)
|
c.logger.Warn("Invalid CA Leaf operation", "operation", req.Op)
|
||||||
return fmt.Errorf("Invalid CA operation '%s'", req.Op)
|
return fmt.Errorf("Invalid CA operation '%s'", req.Op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -14,6 +13,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul/autopilot"
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-raftchunking"
|
"github.com/hashicorp/go-raftchunking"
|
||||||
raftchunkingtypes "github.com/hashicorp/go-raftchunking/types"
|
raftchunkingtypes "github.com/hashicorp/go-raftchunking/types"
|
||||||
|
@ -47,7 +47,8 @@ func generateRandomCoordinate() *coordinate.Coordinate {
|
||||||
|
|
||||||
func TestFSM_RegisterNode(t *testing.T) {
|
func TestFSM_RegisterNode(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -91,7 +92,8 @@ func TestFSM_RegisterNode(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_RegisterNode_Service(t *testing.T) {
|
func TestFSM_RegisterNode_Service(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -154,7 +156,8 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_DeregisterService(t *testing.T) {
|
func TestFSM_DeregisterService(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -216,7 +219,8 @@ func TestFSM_DeregisterService(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_DeregisterCheck(t *testing.T) {
|
func TestFSM_DeregisterCheck(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -278,7 +282,8 @@ func TestFSM_DeregisterCheck(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_DeregisterNode(t *testing.T) {
|
func TestFSM_DeregisterNode(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -355,7 +360,8 @@ func TestFSM_DeregisterNode(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_KVSDelete(t *testing.T) {
|
func TestFSM_KVSDelete(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -401,7 +407,8 @@ func TestFSM_KVSDelete(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_KVSDeleteTree(t *testing.T) {
|
func TestFSM_KVSDeleteTree(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -448,7 +455,8 @@ func TestFSM_KVSDeleteTree(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_KVSDeleteCheckAndSet(t *testing.T) {
|
func TestFSM_KVSDeleteCheckAndSet(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -504,7 +512,8 @@ func TestFSM_KVSDeleteCheckAndSet(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_KVSCheckAndSet(t *testing.T) {
|
func TestFSM_KVSCheckAndSet(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -561,7 +570,8 @@ func TestFSM_KVSCheckAndSet(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_KVSLock(t *testing.T) {
|
func TestFSM_KVSLock(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -613,7 +623,8 @@ func TestFSM_KVSLock(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_KVSUnlock(t *testing.T) {
|
func TestFSM_KVSUnlock(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -683,7 +694,8 @@ func TestFSM_KVSUnlock(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_CoordinateUpdate(t *testing.T) {
|
func TestFSM_CoordinateUpdate(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -730,7 +742,8 @@ func TestFSM_CoordinateUpdate(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_SessionCreate_Destroy(t *testing.T) {
|
func TestFSM_SessionCreate_Destroy(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -817,7 +830,8 @@ func TestFSM_SessionCreate_Destroy(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_ACL_CRUD(t *testing.T) {
|
func TestFSM_ACL_CRUD(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -935,7 +949,8 @@ func TestFSM_ACL_CRUD(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_PreparedQuery_CRUD(t *testing.T) {
|
func TestFSM_PreparedQuery_CRUD(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1040,7 +1055,8 @@ func TestFSM_PreparedQuery_CRUD(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_TombstoneReap(t *testing.T) {
|
func TestFSM_TombstoneReap(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1096,7 +1112,8 @@ func TestFSM_TombstoneReap(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_Txn(t *testing.T) {
|
func TestFSM_Txn(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1138,7 +1155,8 @@ func TestFSM_Txn(t *testing.T) {
|
||||||
|
|
||||||
func TestFSM_Autopilot(t *testing.T) {
|
func TestFSM_Autopilot(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1202,7 +1220,8 @@ func TestFSM_Intention_CRUD(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// Create a new intention.
|
// Create a new intention.
|
||||||
|
@ -1271,7 +1290,8 @@ func TestFSM_CAConfig(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// Set the autopilot config using a request.
|
// Set the autopilot config using a request.
|
||||||
|
@ -1342,7 +1362,8 @@ func TestFSM_CARoots(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// Roots
|
// Roots
|
||||||
|
@ -1374,7 +1395,8 @@ func TestFSM_CABuiltinProvider(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// Provider state.
|
// Provider state.
|
||||||
|
@ -1412,7 +1434,8 @@ func TestFSM_ConfigEntry(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
// Create a simple config entry
|
// Create a simple config entry
|
||||||
|
@ -1459,7 +1482,8 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var logOfLogs [][]*raft.Log
|
var logOfLogs [][]*raft.Log
|
||||||
|
@ -1537,7 +1561,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
|
||||||
err = snap.Persist(sink)
|
err = snap.Persist(sink)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
fsm2, err := New(nil, os.Stderr)
|
fsm2, err := New(nil, logger)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
err = fsm2.Restore(sink)
|
err = fsm2.Restore(sink)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
@ -1592,7 +1616,8 @@ func TestFSM_Chunking_TermChange(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
req := structs.RegisterRequest{
|
req := structs.RegisterRequest{
|
||||||
|
|
|
@ -3,12 +3,13 @@ package fsm
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-msgpack/codec"
|
"github.com/hashicorp/go-msgpack/codec"
|
||||||
"github.com/hashicorp/go-raftchunking"
|
"github.com/hashicorp/go-raftchunking"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
|
@ -45,9 +46,8 @@ func registerCommand(msg structs.MessageType, fn unboundCommand) {
|
||||||
// along with Raft to provide strong consistency. We implement
|
// along with Raft to provide strong consistency. We implement
|
||||||
// this outside the Server to avoid exposing this outside the package.
|
// this outside the Server to avoid exposing this outside the package.
|
||||||
type FSM struct {
|
type FSM struct {
|
||||||
logOutput io.Writer
|
logger hclog.Logger
|
||||||
logger *log.Logger
|
path string
|
||||||
path string
|
|
||||||
|
|
||||||
// apply is built off the commands global and is used to route apply
|
// apply is built off the commands global and is used to route apply
|
||||||
// operations to their appropriate handlers.
|
// operations to their appropriate handlers.
|
||||||
|
@ -66,18 +66,21 @@ type FSM struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New is used to construct a new FSM with a blank state.
|
// New is used to construct a new FSM with a blank state.
|
||||||
func New(gc *state.TombstoneGC, logOutput io.Writer) (*FSM, error) {
|
func New(gc *state.TombstoneGC, logger hclog.Logger) (*FSM, error) {
|
||||||
|
if logger == nil {
|
||||||
|
logger = hclog.New(&hclog.LoggerOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
stateNew, err := state.NewStateStore(gc)
|
stateNew, err := state.NewStateStore(gc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fsm := &FSM{
|
fsm := &FSM{
|
||||||
logOutput: logOutput,
|
logger: logger.Named(logging.FSM),
|
||||||
logger: log.New(logOutput, "", log.LstdFlags),
|
apply: make(map[structs.MessageType]command),
|
||||||
apply: make(map[structs.MessageType]command),
|
state: stateNew,
|
||||||
state: stateNew,
|
gc: gc,
|
||||||
gc: gc,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build out the apply dispatch table based on the registered commands.
|
// Build out the apply dispatch table based on the registered commands.
|
||||||
|
@ -125,7 +128,7 @@ func (c *FSM) Apply(log *raft.Log) interface{} {
|
||||||
// Otherwise, see if it's safe to ignore. If not, we have to panic so
|
// Otherwise, see if it's safe to ignore. If not, we have to panic so
|
||||||
// that we crash and our state doesn't diverge.
|
// that we crash and our state doesn't diverge.
|
||||||
if ignoreUnknown {
|
if ignoreUnknown {
|
||||||
c.logger.Printf("[WARN] consul.fsm: ignoring unknown message type (%d), upgrade to newer version", msgType)
|
c.logger.Warn("ignoring unknown message type, upgrade to newer version", "type", msgType)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
panic(fmt.Errorf("failed to apply request: %#v", buf))
|
panic(fmt.Errorf("failed to apply request: %#v", buf))
|
||||||
|
@ -133,7 +136,7 @@ func (c *FSM) Apply(log *raft.Log) interface{} {
|
||||||
|
|
||||||
func (c *FSM) Snapshot() (raft.FSMSnapshot, error) {
|
func (c *FSM) Snapshot() (raft.FSMSnapshot, error) {
|
||||||
defer func(start time.Time) {
|
defer func(start time.Time) {
|
||||||
c.logger.Printf("[INFO] consul.fsm: snapshot created in %v", time.Since(start))
|
c.logger.Info("snapshot created", "duration", time.Since(start).String())
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
chunkState, err := c.chunker.CurrentState()
|
chunkState, err := c.chunker.CurrentState()
|
||||||
|
|
|
@ -2,10 +2,10 @@ package fsm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
@ -39,7 +39,8 @@ func makeLog(buf []byte) *raft.Log {
|
||||||
|
|
||||||
func TestFSM_IgnoreUnknown(t *testing.T) {
|
func TestFSM_IgnoreUnknown(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// Create a new reap request
|
// Create a new reap request
|
||||||
|
@ -56,3 +57,9 @@ func TestFSM_IgnoreUnknown(t *testing.T) {
|
||||||
err, ok := resp.(error)
|
err, ok := resp.(error)
|
||||||
assert.False(t, ok, "response: %s", err)
|
assert.False(t, ok, "response: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFSM_NilLogger(t *testing.T) {
|
||||||
|
fsm, err := New(nil, nil)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.NotNil(t, fsm)
|
||||||
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package fsm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -14,6 +13,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/go-raftchunking"
|
"github.com/hashicorp/go-raftchunking"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -24,7 +24,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -288,7 +289,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to restore on a new FSM
|
// Try to restore on a new FSM
|
||||||
fsm2, err := New(nil, os.Stderr)
|
fsm2, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -521,7 +522,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
func TestFSM_BadRestore_OSS(t *testing.T) {
|
func TestFSM_BadRestore_OSS(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
// Create an FSM with some state.
|
// Create an FSM with some state.
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -563,7 +565,8 @@ func TestFSM_BadSnapshot_NilCAConfig(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
// Create an FSM with no config entry.
|
// Create an FSM with no config entry.
|
||||||
fsm, err := New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -583,7 +586,7 @@ func TestFSM_BadSnapshot_NilCAConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to restore on a new FSM
|
// Try to restore on a new FSM
|
||||||
fsm2, err := New(nil, os.Stderr)
|
fsm2, err := New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,7 +23,8 @@ var (
|
||||||
// Intention manages the Connect intentions.
|
// Intention manages the Connect intentions.
|
||||||
type Intention struct {
|
type Intention struct {
|
||||||
// srv is a pointer back to the server.
|
// srv is a pointer back to the server.
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Intention) checkIntentionID(id string) (bool, error) {
|
func (s *Intention) checkIntentionID(id string) (bool, error) {
|
||||||
|
@ -45,7 +47,7 @@ func (s *Intention) prepareApplyCreate(ident structs.ACLIdentity, authz acl.Auth
|
||||||
accessorID = ident.ID()
|
accessorID = ident.ID()
|
||||||
}
|
}
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: Intention creation denied due to ACLs, accessorID=%q", accessorID)
|
s.logger.Warn("Intention creation denied due to ACLs", "intention", args.Intention.ID, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +99,7 @@ func (s *Intention) prepareApplyUpdate(ident structs.ACLIdentity, authz acl.Auth
|
||||||
accessorID = ident.ID()
|
accessorID = ident.ID()
|
||||||
}
|
}
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: Update operation on intention denied due to ACLs, intention=%q accessorID=%q", args.Intention.ID, accessorID)
|
s.logger.Warn("Update operation on intention denied due to ACLs", "intention", args.Intention.ID, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +120,7 @@ func (s *Intention) prepareApplyUpdate(ident structs.ACLIdentity, authz acl.Auth
|
||||||
accessorID = ident.ID()
|
accessorID = ident.ID()
|
||||||
}
|
}
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: Update operation on intention denied due to ACLs, intention=%q accessorID=%q", args.Intention.ID, accessorID)
|
s.logger.Warn("Update operation on intention denied due to ACLs", "intention", args.Intention.ID, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +171,7 @@ func (s *Intention) prepareApplyDelete(ident structs.ACLIdentity, authz acl.Auth
|
||||||
accessorID = ident.ID()
|
accessorID = ident.ID()
|
||||||
}
|
}
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: Deletion operation on intention denied due to ACLs, intention=%q accessorID=%q", args.Intention.ID, accessorID)
|
s.logger.Warn("Deletion operation on intention denied due to ACLs", "intention", args.Intention.ID, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,7 +229,7 @@ func (s *Intention) Apply(
|
||||||
// Commit
|
// Commit
|
||||||
resp, err := s.srv.raftApply(structs.IntentionRequestType, args)
|
resp, err := s.srv.raftApply(structs.IntentionRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.srv.logger.Printf("[ERR] consul.intention: Apply failed %v", err)
|
s.logger.Error("Raft apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
@ -270,7 +272,7 @@ func (s *Intention) Get(
|
||||||
if len(reply.Intentions) == 0 {
|
if len(reply.Intentions) == 0 {
|
||||||
accessorID := s.aclAccessorID(args.Token)
|
accessorID := s.aclAccessorID(args.Token)
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: Request to get intention denied due to ACLs, intention=%s accessorID=%q", args.IntentionID, accessorID)
|
s.logger.Warn("Request to get intention denied due to ACLs", "intention", args.IntentionID, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,7 +338,7 @@ func (s *Intention) Match(
|
||||||
if prefix := entry.Name; prefix != "" && rule.IntentionRead(prefix, &authzContext) != acl.Allow {
|
if prefix := entry.Name; prefix != "" && rule.IntentionRead(prefix, &authzContext) != acl.Allow {
|
||||||
accessorID := s.aclAccessorID(args.Token)
|
accessorID := s.aclAccessorID(args.Token)
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: Operation on intention prefix denied due to ACLs, prefix=%s accessorID=%q", prefix, accessorID)
|
s.logger.Warn("Operation on intention prefix denied due to ACLs", "prefix", prefix, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -409,7 +411,7 @@ func (s *Intention) Check(
|
||||||
if rule != nil && rule.ServiceRead(prefix, &authzContext) != acl.Allow {
|
if rule != nil && rule.ServiceRead(prefix, &authzContext) != acl.Allow {
|
||||||
accessorID := s.aclAccessorID(args.Token)
|
accessorID := s.aclAccessorID(args.Token)
|
||||||
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
// todo(kit) Migrate intention access denial logging over to audit logging when we implement it
|
||||||
s.srv.logger.Printf("[WARN] consul.intention: test on intention denied due to ACLs, intention=%s accessorID=%q", prefix, accessorID)
|
s.logger.Warn("test on intention denied due to ACLs", "prefix", prefix, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -471,7 +473,7 @@ func (s *Intention) Check(
|
||||||
func (s *Intention) aclAccessorID(secretID string) string {
|
func (s *Intention) aclAccessorID(secretID string) string {
|
||||||
_, ident, err := s.srv.ResolveIdentityFromToken(secretID)
|
_, ident, err := s.srv.ResolveIdentityFromToken(secretID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.srv.logger.Printf("[DEBUG] consul.intention: %v", err)
|
s.srv.logger.Debug("error", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if ident == nil {
|
if ident == nil {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
bexpr "github.com/hashicorp/go-bexpr"
|
bexpr "github.com/hashicorp/go-bexpr"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
@ -16,7 +17,8 @@ import (
|
||||||
// does not necessarily fit into the other systems. It is also
|
// does not necessarily fit into the other systems. It is also
|
||||||
// used to hold undocumented APIs that users should not rely on.
|
// used to hold undocumented APIs that users should not rely on.
|
||||||
type Internal struct {
|
type Internal struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeInfo is used to retrieve information about a specific node.
|
// NodeInfo is used to retrieve information about a specific node.
|
||||||
|
@ -127,7 +129,7 @@ func (m *Internal) EventFire(args *structs.EventFireRequest,
|
||||||
|
|
||||||
if rule != nil && rule.EventWrite(args.Name, nil) != acl.Allow {
|
if rule != nil && rule.EventWrite(args.Name, nil) != acl.Allow {
|
||||||
accessorID := m.aclAccessorID(args.Token)
|
accessorID := m.aclAccessorID(args.Token)
|
||||||
m.srv.logger.Printf("[DEBUG] consul: user event blocked by ACLs, event=%q accessorID=%q", args.Name, accessorID)
|
m.logger.Warn("user event blocked by ACLs", "event", args.Name, "accessorID", accessorID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,7 +267,7 @@ func (m *Internal) executeKeyringOpMgr(
|
||||||
func (m *Internal) aclAccessorID(secretID string) string {
|
func (m *Internal) aclAccessorID(secretID string) string {
|
||||||
_, ident, err := m.srv.ResolveIdentityFromToken(secretID)
|
_, ident, err := m.srv.ResolveIdentityFromToken(secretID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.srv.logger.Printf("[DEBUG] consul.internal: %v", err)
|
m.srv.logger.Debug("error", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if ident == nil {
|
if ident == nil {
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
package consul
|
package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
consulfsm "github.com/hashicorp/consul/agent/consul/fsm"
|
consulfsm "github.com/hashicorp/consul/agent/consul/fsm"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -23,7 +23,8 @@ func makeLog(buf []byte) *raft.Log {
|
||||||
// Testing for GH-300 and GH-279
|
// Testing for GH-300 and GH-279
|
||||||
func TestHealthCheckRace(t *testing.T) {
|
func TestHealthCheckRace(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
fsm, err := consulfsm.New(nil, os.Stderr)
|
logger := testutil.Logger(t)
|
||||||
|
fsm, err := consulfsm.New(nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,18 +10,20 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// KVS endpoint is used to manipulate the Key-Value store
|
// KVS endpoint is used to manipulate the Key-Value store
|
||||||
type KVS struct {
|
type KVS struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// preApply does all the verification of a KVS update that is performed BEFORE
|
// preApply does all the verification of a KVS update that is performed BEFORE
|
||||||
// we submit as a Raft log entry. This includes enforcing the lock delay which
|
// we submit as a Raft log entry. This includes enforcing the lock delay which
|
||||||
// must only be done on the leader.
|
// must only be done on the leader.
|
||||||
func kvsPreApply(srv *Server, authz acl.Authorizer, op api.KVOp, dirEnt *structs.DirEntry) (bool, error) {
|
func kvsPreApply(logger hclog.Logger, srv *Server, authz acl.Authorizer, op api.KVOp, dirEnt *structs.DirEntry) (bool, error) {
|
||||||
// Verify the entry.
|
// Verify the entry.
|
||||||
if dirEnt.Key == "" && op != api.KVDeleteTree {
|
if dirEnt.Key == "" && op != api.KVDeleteTree {
|
||||||
return false, fmt.Errorf("Must provide key")
|
return false, fmt.Errorf("Must provide key")
|
||||||
|
@ -72,8 +74,10 @@ func kvsPreApply(srv *Server, authz acl.Authorizer, op api.KVOp, dirEnt *structs
|
||||||
state := srv.fsm.State()
|
state := srv.fsm.State()
|
||||||
expires := state.KVSLockDelay(dirEnt.Key, &dirEnt.EnterpriseMeta)
|
expires := state.KVSLockDelay(dirEnt.Key, &dirEnt.EnterpriseMeta)
|
||||||
if expires.After(time.Now()) {
|
if expires.After(time.Now()) {
|
||||||
srv.logger.Printf("[WARN] consul.kvs: Rejecting lock of %s due to lock-delay until %v",
|
logger.Warn("Rejecting lock of key due to lock-delay",
|
||||||
dirEnt.Key, expires)
|
"key", dirEnt.Key,
|
||||||
|
"expire_time", expires.String(),
|
||||||
|
)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +102,7 @@ func (k *KVS) Apply(args *structs.KVSRequest, reply *bool) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, err := kvsPreApply(k.srv, authz, args.Op, &args.DirEnt)
|
ok, err := kvsPreApply(k.logger, k.srv, authz, args.Op, &args.DirEnt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -110,7 +114,7 @@ func (k *KVS) Apply(args *structs.KVSRequest, reply *bool) error {
|
||||||
// Apply the update.
|
// Apply the update.
|
||||||
resp, err := k.srv.raftApply(structs.KVSRequestType, args)
|
resp, err := k.srv.raftApply(structs.KVSRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
k.srv.logger.Printf("[ERR] consul.kvs: Apply failed: %v", err)
|
k.logger.Error("Raft apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
|
|
@ -16,7 +16,9 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
"github.com/hashicorp/go-version"
|
"github.com/hashicorp/go-version"
|
||||||
|
@ -66,7 +68,7 @@ func (s *Server) monitorLeadership() {
|
||||||
switch {
|
switch {
|
||||||
case isLeader:
|
case isLeader:
|
||||||
if weAreLeaderCh != nil {
|
if weAreLeaderCh != nil {
|
||||||
s.logger.Printf("[ERR] consul: attempted to start the leader loop while running")
|
s.logger.Error("attempted to start the leader loop while running")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,19 +78,19 @@ func (s *Server) monitorLeadership() {
|
||||||
defer leaderLoop.Done()
|
defer leaderLoop.Done()
|
||||||
s.leaderLoop(ch)
|
s.leaderLoop(ch)
|
||||||
}(weAreLeaderCh)
|
}(weAreLeaderCh)
|
||||||
s.logger.Printf("[INFO] consul: cluster leadership acquired")
|
s.logger.Info("cluster leadership acquired")
|
||||||
|
|
||||||
default:
|
default:
|
||||||
if weAreLeaderCh == nil {
|
if weAreLeaderCh == nil {
|
||||||
s.logger.Printf("[ERR] consul: attempted to stop the leader loop while not running")
|
s.logger.Error("attempted to stop the leader loop while not running")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] consul: shutting down leader loop")
|
s.logger.Debug("shutting down leader loop")
|
||||||
close(weAreLeaderCh)
|
close(weAreLeaderCh)
|
||||||
leaderLoop.Wait()
|
leaderLoop.Wait()
|
||||||
weAreLeaderCh = nil
|
weAreLeaderCh = nil
|
||||||
s.logger.Printf("[INFO] consul: cluster leadership lost")
|
s.logger.Info("cluster leadership lost")
|
||||||
}
|
}
|
||||||
case <-aclUpgradeCh:
|
case <-aclUpgradeCh:
|
||||||
if atomic.LoadInt32(&s.useNewACLs) == 0 {
|
if atomic.LoadInt32(&s.useNewACLs) == 0 {
|
||||||
|
@ -101,12 +103,12 @@ func (s *Server) monitorLeadership() {
|
||||||
if canUpgrade := s.canUpgradeToNewACLs(weAreLeaderCh != nil); canUpgrade {
|
if canUpgrade := s.canUpgradeToNewACLs(weAreLeaderCh != nil); canUpgrade {
|
||||||
if weAreLeaderCh != nil {
|
if weAreLeaderCh != nil {
|
||||||
if err := s.initializeACLs(true); err != nil {
|
if err := s.initializeACLs(true); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: error transitioning to using new ACLs: %v", err)
|
s.logger.Error("error transitioning to using new ACLs", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] acl: transitioning out of legacy ACL mode")
|
s.logger.Debug("transitioning out of legacy ACL mode")
|
||||||
atomic.StoreInt32(&s.useNewACLs, 1)
|
atomic.StoreInt32(&s.useNewACLs, 1)
|
||||||
s.updateACLAdvertisement()
|
s.updateACLAdvertisement()
|
||||||
|
|
||||||
|
@ -128,9 +130,16 @@ func (s *Server) leadershipTransfer() error {
|
||||||
for i := 0; i < retryCount; i++ {
|
for i := 0; i < retryCount; i++ {
|
||||||
future := s.raft.LeadershipTransfer()
|
future := s.raft.LeadershipTransfer()
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to transfer leadership attempt %d/%d: %v", i, retryCount, err)
|
s.logger.Error("failed to transfer leadership attempt, will retry",
|
||||||
|
"attempt", i,
|
||||||
|
"retry_limit", retryCount,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
s.logger.Printf("[ERR] consul: successfully transferred leadership attempt %d/%d", i, retryCount)
|
s.logger.Info("successfully transferred leadership",
|
||||||
|
"attempt", i,
|
||||||
|
"retry_limit", retryCount,
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +154,10 @@ func (s *Server) leaderLoop(stopCh chan struct{}) {
|
||||||
payload := []byte(s.config.NodeName)
|
payload := []byte(s.config.NodeName)
|
||||||
for name, segment := range s.LANSegments() {
|
for name, segment := range s.LANSegments() {
|
||||||
if err := segment.UserEvent(newLeaderEvent, payload, false); err != nil {
|
if err := segment.UserEvent(newLeaderEvent, payload, false); err != nil {
|
||||||
s.logger.Printf("[WARN] consul: failed to broadcast new leader event on segment %q: %v", name, err)
|
s.logger.Warn("failed to broadcast new leader event on segment",
|
||||||
|
"segment", name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +175,7 @@ RECONCILE:
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
barrier := s.raft.Barrier(barrierWriteTimeout)
|
barrier := s.raft.Barrier(barrierWriteTimeout)
|
||||||
if err := barrier.Error(); err != nil {
|
if err := barrier.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err)
|
s.logger.Error("failed to wait for barrier", "error", err)
|
||||||
goto WAIT
|
goto WAIT
|
||||||
}
|
}
|
||||||
metrics.MeasureSince([]string{"leader", "barrier"}, start)
|
metrics.MeasureSince([]string{"leader", "barrier"}, start)
|
||||||
|
@ -171,7 +183,7 @@ RECONCILE:
|
||||||
// Check if we need to handle initial leadership actions
|
// Check if we need to handle initial leadership actions
|
||||||
if !establishedLeader {
|
if !establishedLeader {
|
||||||
if err := s.establishLeadership(); err != nil {
|
if err := s.establishLeadership(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to establish leadership: %v", err)
|
s.logger.Error("failed to establish leadership", "error", err)
|
||||||
// Immediately revoke leadership since we didn't successfully
|
// Immediately revoke leadership since we didn't successfully
|
||||||
// establish leadership.
|
// establish leadership.
|
||||||
s.revokeLeadership()
|
s.revokeLeadership()
|
||||||
|
@ -182,7 +194,7 @@ RECONCILE:
|
||||||
// will try to acquire it again after
|
// will try to acquire it again after
|
||||||
// 5 seconds.
|
// 5 seconds.
|
||||||
if err := s.leadershipTransfer(); err != nil {
|
if err := s.leadershipTransfer(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: %v", err)
|
s.logger.Error("failed to transfer leadership", "error", err)
|
||||||
interval = time.After(5 * time.Second)
|
interval = time.After(5 * time.Second)
|
||||||
goto WAIT
|
goto WAIT
|
||||||
}
|
}
|
||||||
|
@ -194,7 +206,7 @@ RECONCILE:
|
||||||
|
|
||||||
// Reconcile any missing data
|
// Reconcile any missing data
|
||||||
if err := s.reconcile(); err != nil {
|
if err := s.reconcile(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to reconcile: %v", err)
|
s.logger.Error("failed to reconcile", "error", err)
|
||||||
goto WAIT
|
goto WAIT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,7 +402,7 @@ func (s *Server) initializeLegacyACL() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create anonymous token: %v", err)
|
return fmt.Errorf("failed to create anonymous token: %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] acl: Created the anonymous token")
|
s.logger.Info("Created the anonymous token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for configured master token.
|
// Check for configured master token.
|
||||||
|
@ -414,7 +426,7 @@ func (s *Server) initializeLegacyACL() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create master token: %v", err)
|
return fmt.Errorf("failed to create master token: %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Created ACL master token from configuration")
|
s.logger.Info("Created ACL master token from configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -444,9 +456,9 @@ func (s *Server) initializeLegacyACL() error {
|
||||||
|
|
||||||
case bool:
|
case bool:
|
||||||
if v {
|
if v {
|
||||||
s.logger.Printf("[INFO] consul: ACL bootstrap enabled")
|
s.logger.Info("ACL bootstrap enabled")
|
||||||
} else {
|
} else {
|
||||||
s.logger.Printf("[INFO] consul: ACL bootstrap disabled, existing management tokens found")
|
s.logger.Info("ACL bootstrap disabled, existing management tokens found")
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -454,7 +466,7 @@ func (s *Server) initializeLegacyACL() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.logger.Printf("[WARN] consul: Can't initialize ACL bootstrap until all servers are >= %s", minVersion.String())
|
s.logger.Warn("Can't initialize ACL bootstrap until all servers are >= " + minVersion.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -492,11 +504,11 @@ func (s *Server) initializeACLs(upgrade bool) error {
|
||||||
|
|
||||||
if s.InACLDatacenter() {
|
if s.InACLDatacenter() {
|
||||||
if s.UseLegacyACLs() && !upgrade {
|
if s.UseLegacyACLs() && !upgrade {
|
||||||
s.logger.Printf("[INFO] acl: initializing legacy acls")
|
s.logger.Info("initializing legacy acls")
|
||||||
return s.initializeLegacyACL()
|
return s.initializeLegacyACL()
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[INFO] acl: initializing acls")
|
s.logger.Info("initializing acls")
|
||||||
|
|
||||||
// Create/Upgrade the builtin global-management policy
|
// Create/Upgrade the builtin global-management policy
|
||||||
_, policy, err := s.fsm.State().ACLPolicyGetByID(nil, structs.ACLPolicyGlobalManagementID, structs.DefaultEnterpriseMeta())
|
_, policy, err := s.fsm.State().ACLPolicyGetByID(nil, structs.ACLPolicyGlobalManagementID, structs.DefaultEnterpriseMeta())
|
||||||
|
@ -526,14 +538,14 @@ func (s *Server) initializeACLs(upgrade bool) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create global-management policy: %v", err)
|
return fmt.Errorf("failed to create global-management policy: %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Created ACL 'global-management' policy")
|
s.logger.Info("Created ACL 'global-management' policy")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for configured master token.
|
// Check for configured master token.
|
||||||
if master := s.config.ACLMasterToken; len(master) > 0 {
|
if master := s.config.ACLMasterToken; len(master) > 0 {
|
||||||
state := s.fsm.State()
|
state := s.fsm.State()
|
||||||
if _, err := uuid.ParseUUID(master); err != nil {
|
if _, err := uuid.ParseUUID(master); err != nil {
|
||||||
s.logger.Printf("[WARN] consul: Configuring a non-UUID master token is deprecated")
|
s.logger.Warn("Configuring a non-UUID master token is deprecated")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, token, err := state.ACLTokenGetBySecret(nil, master, nil)
|
_, token, err := state.ACLTokenGetBySecret(nil, master, nil)
|
||||||
|
@ -573,7 +585,7 @@ func (s *Server) initializeACLs(upgrade bool) error {
|
||||||
ResetIndex: 0,
|
ResetIndex: 0,
|
||||||
}
|
}
|
||||||
if _, err := s.raftApply(structs.ACLBootstrapRequestType, &req); err == nil {
|
if _, err := s.raftApply(structs.ACLBootstrapRequestType, &req); err == nil {
|
||||||
s.logger.Printf("[INFO] consul: Bootstrapped ACL master token from configuration")
|
s.logger.Info("Bootstrapped ACL master token from configuration")
|
||||||
done = true
|
done = true
|
||||||
} else {
|
} else {
|
||||||
if err.Error() != structs.ACLBootstrapNotAllowedErr.Error() &&
|
if err.Error() != structs.ACLBootstrapNotAllowedErr.Error() &&
|
||||||
|
@ -593,7 +605,7 @@ func (s *Server) initializeACLs(upgrade bool) error {
|
||||||
return fmt.Errorf("failed to create master token: %v", err)
|
return fmt.Errorf("failed to create master token: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[INFO] consul: Created ACL master token from configuration")
|
s.logger.Info("Created ACL master token from configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -632,7 +644,7 @@ func (s *Server) initializeACLs(upgrade bool) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create anonymous token: %v", err)
|
return fmt.Errorf("failed to create anonymous token: %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Created ACL anonymous token from configuration")
|
s.logger.Info("Created ACL anonymous token from configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// launch the upgrade go routine to generate accessors for everything
|
// launch the upgrade go routine to generate accessors for everything
|
||||||
|
@ -670,7 +682,7 @@ func (s *Server) legacyACLTokenUpgrade(ctx context.Context) error {
|
||||||
state := s.fsm.State()
|
state := s.fsm.State()
|
||||||
tokens, waitCh, err := state.ACLTokenListUpgradeable(aclUpgradeBatchSize)
|
tokens, waitCh, err := state.ACLTokenListUpgradeable(aclUpgradeBatchSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[WARN] acl: encountered an error while searching for tokens without accessor ids: %v", err)
|
s.logger.Warn("encountered an error while searching for tokens without accessor ids", "error", err)
|
||||||
}
|
}
|
||||||
// No need to check expiration time here, as that only exists for v2 tokens.
|
// No need to check expiration time here, as that only exists for v2 tokens.
|
||||||
|
|
||||||
|
@ -698,7 +710,7 @@ func (s *Server) legacyACLTokenUpgrade(ctx context.Context) error {
|
||||||
} else {
|
} else {
|
||||||
accessor, err := lib.GenerateUUID(s.checkTokenUUID)
|
accessor, err := lib.GenerateUUID(s.checkTokenUUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[WARN] acl: failed to generate accessor during token auto-upgrade: %v", err)
|
s.logger.Warn("failed to generate accessor during token auto-upgrade", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newToken.AccessorID = accessor
|
newToken.AccessorID = accessor
|
||||||
|
@ -725,11 +737,11 @@ func (s *Server) legacyACLTokenUpgrade(ctx context.Context) error {
|
||||||
|
|
||||||
resp, err := s.raftApply(structs.ACLTokenSetRequestType, req)
|
resp, err := s.raftApply(structs.ACLTokenSetRequestType, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] acl: failed to apply acl token upgrade batch: %v", err)
|
s.logger.Error("failed to apply acl token upgrade batch", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err, ok := resp.(error); ok {
|
if err, ok := resp.(error); ok {
|
||||||
s.logger.Printf("[ERR] acl: failed to apply acl token upgrade batch: %v", err)
|
s.logger.Error("failed to apply acl token upgrade batch", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -751,6 +763,7 @@ func (s *Server) stopACLUpgrade() {
|
||||||
// the context passed in indicates that it should exit.
|
// the context passed in indicates that it should exit.
|
||||||
func (s *Server) runLegacyACLReplication(ctx context.Context) error {
|
func (s *Server) runLegacyACLReplication(ctx context.Context) error {
|
||||||
var lastRemoteIndex uint64
|
var lastRemoteIndex uint64
|
||||||
|
legacyACLLogger := s.aclReplicationLogger(logging.Legacy)
|
||||||
limiter := rate.NewLimiter(rate.Limit(s.config.ACLReplicationRate), s.config.ACLReplicationBurst)
|
limiter := rate.NewLimiter(rate.Limit(s.config.ACLReplicationRate), s.config.ACLReplicationBurst)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -762,7 +775,7 @@ func (s *Server) runLegacyACLReplication(ctx context.Context) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
index, exit, err := s.replicateLegacyACLs(lastRemoteIndex, ctx)
|
index, exit, err := s.replicateLegacyACLs(ctx, legacyACLLogger, lastRemoteIndex)
|
||||||
if exit {
|
if exit {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -770,11 +783,11 @@ func (s *Server) runLegacyACLReplication(ctx context.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lastRemoteIndex = 0
|
lastRemoteIndex = 0
|
||||||
s.updateACLReplicationStatusError()
|
s.updateACLReplicationStatusError()
|
||||||
s.logger.Printf("[WARN] consul: Legacy ACL replication error (will retry if still leader): %v", err)
|
legacyACLLogger.Warn("Legacy ACL replication error (will retry if still leader)", "error", err)
|
||||||
} else {
|
} else {
|
||||||
lastRemoteIndex = index
|
lastRemoteIndex = index
|
||||||
s.updateACLReplicationStatusIndex(structs.ACLReplicateLegacy, index)
|
s.updateACLReplicationStatusIndex(structs.ACLReplicateLegacy, index)
|
||||||
s.logger.Printf("[DEBUG] consul: Legacy ACL replication completed through remote index %d", index)
|
legacyACLLogger.Debug("Legacy ACL replication completed through remote index", "index", index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -794,7 +807,7 @@ func (s *Server) startLegacyACLReplication() {
|
||||||
s.initReplicationStatus()
|
s.initReplicationStatus()
|
||||||
|
|
||||||
s.leaderRoutineManager.Start(legacyACLReplicationRoutineName, s.runLegacyACLReplication)
|
s.leaderRoutineManager.Start(legacyACLReplicationRoutineName, s.runLegacyACLReplication)
|
||||||
s.logger.Printf("[INFO] acl: started legacy ACL replication")
|
s.logger.Info("started legacy ACL replication")
|
||||||
s.updateACLReplicationStatusRunning(structs.ACLReplicateLegacy)
|
s.updateACLReplicationStatusRunning(structs.ACLReplicateLegacy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -822,32 +835,40 @@ func (s *Server) startACLReplication() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type replicateFunc func(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error)
|
type replicateFunc func(ctx context.Context, logger hclog.Logger, lastRemoteIndex uint64) (uint64, bool, error)
|
||||||
|
|
||||||
// This function is only intended to be run as a managed go routine, it will block until
|
// This function is only intended to be run as a managed go routine, it will block until
|
||||||
// the context passed in indicates that it should exit.
|
// the context passed in indicates that it should exit.
|
||||||
func (s *Server) runACLPolicyReplicator(ctx context.Context) error {
|
func (s *Server) runACLPolicyReplicator(ctx context.Context) error {
|
||||||
s.logger.Printf("[INFO] acl: started ACL Policy replication")
|
policyLogger := s.aclReplicationLogger(structs.ACLReplicatePolicies.SingularNoun())
|
||||||
|
policyLogger.Info("started ACL Policy replication")
|
||||||
return s.runACLReplicator(ctx, structs.ACLReplicatePolicies, s.replicateACLPolicies)
|
return s.runACLReplicator(ctx, policyLogger, structs.ACLReplicatePolicies, s.replicateACLPolicies)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is only intended to be run as a managed go routine, it will block until
|
// This function is only intended to be run as a managed go routine, it will block until
|
||||||
// the context passed in indicates that it should exit.
|
// the context passed in indicates that it should exit.
|
||||||
func (s *Server) runACLRoleReplicator(ctx context.Context) error {
|
func (s *Server) runACLRoleReplicator(ctx context.Context) error {
|
||||||
s.logger.Printf("[INFO] acl: started ACL Role replication")
|
roleLogger := s.aclReplicationLogger(structs.ACLReplicateRoles.SingularNoun())
|
||||||
return s.runACLReplicator(ctx, structs.ACLReplicateRoles, s.replicateACLRoles)
|
roleLogger.Info("started ACL Role replication")
|
||||||
|
return s.runACLReplicator(ctx, roleLogger, structs.ACLReplicateRoles, s.replicateACLRoles)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is only intended to be run as a managed go routine, it will block until
|
// This function is only intended to be run as a managed go routine, it will block until
|
||||||
// the context passed in indicates that it should exit.
|
// the context passed in indicates that it should exit.
|
||||||
func (s *Server) runACLTokenReplicator(ctx context.Context) error {
|
func (s *Server) runACLTokenReplicator(ctx context.Context) error {
|
||||||
return s.runACLReplicator(ctx, structs.ACLReplicateTokens, s.replicateACLTokens)
|
tokenLogger := s.aclReplicationLogger(structs.ACLReplicateTokens.SingularNoun())
|
||||||
|
tokenLogger.Info("started ACL Token replication")
|
||||||
|
return s.runACLReplicator(ctx, tokenLogger, structs.ACLReplicateTokens, s.replicateACLTokens)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is only intended to be run as a managed go routine, it will block until
|
// This function is only intended to be run as a managed go routine, it will block until
|
||||||
// the context passed in indicates that it should exit.
|
// the context passed in indicates that it should exit.
|
||||||
func (s *Server) runACLReplicator(ctx context.Context, replicationType structs.ACLReplicationType, replicateFunc replicateFunc) error {
|
func (s *Server) runACLReplicator(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
replicationType structs.ACLReplicationType,
|
||||||
|
replicateFunc replicateFunc,
|
||||||
|
) error {
|
||||||
var failedAttempts uint
|
var failedAttempts uint
|
||||||
limiter := rate.NewLimiter(rate.Limit(s.config.ACLReplicationRate), s.config.ACLReplicationBurst)
|
limiter := rate.NewLimiter(rate.Limit(s.config.ACLReplicationRate), s.config.ACLReplicationBurst)
|
||||||
|
|
||||||
|
@ -861,7 +882,7 @@ func (s *Server) runACLReplicator(ctx context.Context, replicationType structs.A
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
index, exit, err := replicateFunc(ctx, lastRemoteIndex)
|
index, exit, err := replicateFunc(ctx, logger, lastRemoteIndex)
|
||||||
if exit {
|
if exit {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -869,7 +890,9 @@ func (s *Server) runACLReplicator(ctx context.Context, replicationType structs.A
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lastRemoteIndex = 0
|
lastRemoteIndex = 0
|
||||||
s.updateACLReplicationStatusError()
|
s.updateACLReplicationStatusError()
|
||||||
s.logger.Printf("[WARN] consul: ACL %s replication error (will retry if still leader): %v", replicationType.SingularNoun(), err)
|
logger.Warn("ACL replication error (will retry if still leader)",
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
if (1 << failedAttempts) < aclReplicationMaxRetryBackoff {
|
if (1 << failedAttempts) < aclReplicationMaxRetryBackoff {
|
||||||
failedAttempts++
|
failedAttempts++
|
||||||
}
|
}
|
||||||
|
@ -883,12 +906,21 @@ func (s *Server) runACLReplicator(ctx context.Context, replicationType structs.A
|
||||||
} else {
|
} else {
|
||||||
lastRemoteIndex = index
|
lastRemoteIndex = index
|
||||||
s.updateACLReplicationStatusIndex(replicationType, index)
|
s.updateACLReplicationStatusIndex(replicationType, index)
|
||||||
s.logger.Printf("[DEBUG] consul: ACL %s replication completed through remote index %d", replicationType.SingularNoun(), index)
|
logger.Debug("ACL replication completed through remote index",
|
||||||
|
"index", index,
|
||||||
|
)
|
||||||
failedAttempts = 0
|
failedAttempts = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) aclReplicationLogger(singularNoun string) hclog.Logger {
|
||||||
|
return s.loggers.
|
||||||
|
Named(logging.Replication).
|
||||||
|
Named(logging.ACL).
|
||||||
|
Named(singularNoun)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) stopACLReplication() {
|
func (s *Server) stopACLReplication() {
|
||||||
// these will be no-ops when not started
|
// these will be no-ops when not started
|
||||||
s.leaderRoutineManager.Stop(legacyACLReplicationRoutineName)
|
s.leaderRoutineManager.Stop(legacyACLReplicationRoutineName)
|
||||||
|
@ -913,10 +945,11 @@ func (s *Server) stopConfigReplication() {
|
||||||
|
|
||||||
// getOrCreateAutopilotConfig is used to get the autopilot config, initializing it if necessary
|
// getOrCreateAutopilotConfig is used to get the autopilot config, initializing it if necessary
|
||||||
func (s *Server) getOrCreateAutopilotConfig() *autopilot.Config {
|
func (s *Server) getOrCreateAutopilotConfig() *autopilot.Config {
|
||||||
|
logger := s.loggers.Named(logging.Autopilot)
|
||||||
state := s.fsm.State()
|
state := s.fsm.State()
|
||||||
_, config, err := state.AutopilotConfig()
|
_, config, err := state.AutopilotConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] autopilot: failed to get config: %v", err)
|
logger.Error("failed to get config", "error", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if config != nil {
|
if config != nil {
|
||||||
|
@ -924,14 +957,14 @@ func (s *Server) getOrCreateAutopilotConfig() *autopilot.Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ServersMeetMinimumVersion(s.LANMembers(), minAutopilotVersion) {
|
if !ServersMeetMinimumVersion(s.LANMembers(), minAutopilotVersion) {
|
||||||
s.logger.Printf("[WARN] autopilot: can't initialize until all servers are >= %s", minAutopilotVersion.String())
|
logger.Warn("can't initialize until all servers are >= " + minAutopilotVersion.String())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
config = s.config.AutopilotConfig
|
config = s.config.AutopilotConfig
|
||||||
req := structs.AutopilotSetConfigRequest{Config: *config}
|
req := structs.AutopilotSetConfigRequest{Config: *config}
|
||||||
if _, err = s.raftApply(structs.AutopilotRequestType, req); err != nil {
|
if _, err = s.raftApply(structs.AutopilotRequestType, req); err != nil {
|
||||||
s.logger.Printf("[ERR] autopilot: failed to initialize config: %v", err)
|
logger.Error("failed to initialize config", "error", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -950,7 +983,9 @@ func (s *Server) bootstrapConfigEntries(entries []structs.ConfigEntry) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ServersMeetMinimumVersion(s.LANMembers(), minCentralizedConfigVersion) {
|
if !ServersMeetMinimumVersion(s.LANMembers(), minCentralizedConfigVersion) {
|
||||||
s.logger.Printf("[WARN] centralized config: can't initialize until all servers >= %s", minCentralizedConfigVersion.String())
|
s.loggers.
|
||||||
|
Named(logging.CentralConfig).
|
||||||
|
Warn("config: can't initialize until all servers >=" + minCentralizedConfigVersion.String())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1020,7 +1055,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
|
||||||
if service.ID == structs.ConsulServiceID {
|
if service.ID == structs.ConsulServiceID {
|
||||||
_, node, err := state.GetNode(check.Node)
|
_, node, err := state.GetNode(check.Node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] consul: Unable to look up node with name %q: %v", check.Node, err)
|
s.logger.Error("Unable to look up node with name", "name", check.Node, "error", err)
|
||||||
continue CHECKS
|
continue CHECKS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1065,7 +1100,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}) error {
|
||||||
func (s *Server) reconcileMember(member serf.Member) error {
|
func (s *Server) reconcileMember(member serf.Member) error {
|
||||||
// Check if this is a member we should handle
|
// Check if this is a member we should handle
|
||||||
if !s.shouldHandleMember(member) {
|
if !s.shouldHandleMember(member) {
|
||||||
s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member)
|
s.logger.Warn("skipping reconcile of node", "member", member)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now())
|
defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now())
|
||||||
|
@ -1081,8 +1116,10 @@ func (s *Server) reconcileMember(member serf.Member) error {
|
||||||
err = s.handleReapMember(member)
|
err = s.handleReapMember(member)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to reconcile member: %v: %v",
|
s.logger.Error("failed to reconcile member",
|
||||||
member, err)
|
"member", member,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
|
|
||||||
// Permission denied should not bubble up
|
// Permission denied should not bubble up
|
||||||
if acl.IsErrPermissionDenied(err) {
|
if acl.IsErrPermissionDenied(err) {
|
||||||
|
@ -1172,7 +1209,7 @@ func (s *Server) handleAliveMember(member serf.Member) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AFTER_CHECK:
|
AFTER_CHECK:
|
||||||
s.logger.Printf("[INFO] consul: member '%s' joined, marking health alive", member.Name)
|
s.logger.Info("member joined, marking health alive", "member", member.Name)
|
||||||
|
|
||||||
// Register with the catalog.
|
// Register with the catalog.
|
||||||
req := structs.RegisterRequest{
|
req := structs.RegisterRequest{
|
||||||
|
@ -1209,7 +1246,7 @@ func (s *Server) handleFailedMember(member serf.Member) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if node == nil {
|
if node == nil {
|
||||||
s.logger.Printf("[INFO] consul: ignoring failed event for member '%s' because it does not exist in the catalog", member.Name)
|
s.logger.Info("ignoring failed event for member because it does not exist in the catalog", "member", member.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1225,7 +1262,7 @@ func (s *Server) handleFailedMember(member serf.Member) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: member '%s' failed, marking health critical", member.Name)
|
s.logger.Info("member failed, marking health critical", "member", member.Name)
|
||||||
|
|
||||||
// Register with the catalog
|
// Register with the catalog
|
||||||
req := structs.RegisterRequest{
|
req := structs.RegisterRequest{
|
||||||
|
@ -1267,7 +1304,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member) error
|
||||||
// is leaving. Instead, we should allow a follower to take-over and
|
// is leaving. Instead, we should allow a follower to take-over and
|
||||||
// deregister us later.
|
// deregister us later.
|
||||||
if member.Name == s.config.NodeName {
|
if member.Name == s.config.NodeName {
|
||||||
s.logger.Printf("[WARN] consul: deregistering self (%s) should be done by follower", s.config.NodeName)
|
s.logger.Warn("deregistering self should be done by follower", "name", s.config.NodeName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1289,7 +1326,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deregister the node
|
// Deregister the node
|
||||||
s.logger.Printf("[INFO] consul: member '%s' %s, deregistering", member.Name, reason)
|
s.logger.Info("deregistering member", "member", member.Name, "reason", reason)
|
||||||
req := structs.DeregisterRequest{
|
req := structs.DeregisterRequest{
|
||||||
Datacenter: s.config.Datacenter,
|
Datacenter: s.config.Datacenter,
|
||||||
Node: member.Name,
|
Node: member.Name,
|
||||||
|
@ -1306,7 +1343,10 @@ func (s *Server) joinConsulServer(m serf.Member, parts *metadata.Server) error {
|
||||||
for _, member := range members {
|
for _, member := range members {
|
||||||
valid, p := metadata.IsConsulServer(member)
|
valid, p := metadata.IsConsulServer(member)
|
||||||
if valid && member.Name != m.Name && p.Bootstrap {
|
if valid && member.Name != m.Name && p.Bootstrap {
|
||||||
s.logger.Printf("[ERR] consul: '%v' and '%v' are both in bootstrap mode. Only one node should be in bootstrap mode, not adding Raft peer.", m.Name, member.Name)
|
s.logger.Error("Two nodes are in bootstrap mode. Only one node should be in bootstrap mode, not adding Raft peer.",
|
||||||
|
"node_to_add", m.Name,
|
||||||
|
"other", member.Name,
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1317,12 +1357,12 @@ func (s *Server) joinConsulServer(m serf.Member, parts *metadata.Server) error {
|
||||||
// safe to attempt if there are multiple servers available.
|
// safe to attempt if there are multiple servers available.
|
||||||
configFuture := s.raft.GetConfiguration()
|
configFuture := s.raft.GetConfiguration()
|
||||||
if err := configFuture.Error(); err != nil {
|
if err := configFuture.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to get raft configuration: %v", err)
|
s.logger.Error("failed to get raft configuration", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if m.Name == s.config.NodeName {
|
if m.Name == s.config.NodeName {
|
||||||
if l := len(configFuture.Configuration().Servers); l < 3 {
|
if l := len(configFuture.Configuration().Servers); l < 3 {
|
||||||
s.logger.Printf("[DEBUG] consul: Skipping self join check for %q since the cluster is too small", m.Name)
|
s.logger.Debug("Skipping self join check for node since the cluster is too small", "node", m.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1353,12 +1393,12 @@ func (s *Server) joinConsulServer(m serf.Member, parts *metadata.Server) error {
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
return fmt.Errorf("error removing server with duplicate address %q: %s", server.Address, err)
|
return fmt.Errorf("error removing server with duplicate address %q: %s", server.Address, err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: removed server with duplicate address: %s", server.Address)
|
s.logger.Info("removed server with duplicate address", "address", server.Address)
|
||||||
} else {
|
} else {
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
return fmt.Errorf("error removing server with duplicate ID %q: %s", server.ID, err)
|
return fmt.Errorf("error removing server with duplicate ID %q: %s", server.ID, err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: removed server with duplicate ID: %s", server.ID)
|
s.logger.Info("removed server with duplicate ID", "id", server.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1368,19 +1408,19 @@ func (s *Server) joinConsulServer(m serf.Member, parts *metadata.Server) error {
|
||||||
case minRaftProtocol >= 3:
|
case minRaftProtocol >= 3:
|
||||||
addFuture := s.raft.AddNonvoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
addFuture := s.raft.AddNonvoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
||||||
if err := addFuture.Error(); err != nil {
|
if err := addFuture.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
|
s.logger.Error("failed to add raft peer", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case minRaftProtocol == 2 && parts.RaftVersion >= 3:
|
case minRaftProtocol == 2 && parts.RaftVersion >= 3:
|
||||||
addFuture := s.raft.AddVoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
addFuture := s.raft.AddVoter(raft.ServerID(parts.ID), raft.ServerAddress(addr), 0, 0)
|
||||||
if err := addFuture.Error(); err != nil {
|
if err := addFuture.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
|
s.logger.Error("failed to add raft peer", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
addFuture := s.raft.AddPeer(raft.ServerAddress(addr))
|
addFuture := s.raft.AddPeer(raft.ServerAddress(addr))
|
||||||
if err := addFuture.Error(); err != nil {
|
if err := addFuture.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to add raft peer: %v", err)
|
s.logger.Error("failed to add raft peer", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1400,7 +1440,7 @@ func (s *Server) removeConsulServer(m serf.Member, port int) error {
|
||||||
// log entries.
|
// log entries.
|
||||||
configFuture := s.raft.GetConfiguration()
|
configFuture := s.raft.GetConfiguration()
|
||||||
if err := configFuture.Error(); err != nil {
|
if err := configFuture.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to get raft configuration: %v", err)
|
s.logger.Error("failed to get raft configuration", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1415,21 +1455,25 @@ func (s *Server) removeConsulServer(m serf.Member, port int) error {
|
||||||
for _, server := range configFuture.Configuration().Servers {
|
for _, server := range configFuture.Configuration().Servers {
|
||||||
// If we understand the new add/remove APIs and the server was added by ID, use the new remove API
|
// If we understand the new add/remove APIs and the server was added by ID, use the new remove API
|
||||||
if minRaftProtocol >= 2 && server.ID == raft.ServerID(parts.ID) {
|
if minRaftProtocol >= 2 && server.ID == raft.ServerID(parts.ID) {
|
||||||
s.logger.Printf("[INFO] consul: removing server by ID: %q", server.ID)
|
s.logger.Info("removing server by ID", "id", server.ID)
|
||||||
future := s.raft.RemoveServer(raft.ServerID(parts.ID), 0, 0)
|
future := s.raft.RemoveServer(raft.ServerID(parts.ID), 0, 0)
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to remove raft peer '%v': %v",
|
s.logger.Error("failed to remove raft peer",
|
||||||
server.ID, err)
|
"id", server.ID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
} else if server.Address == raft.ServerAddress(addr) {
|
} else if server.Address == raft.ServerAddress(addr) {
|
||||||
// If not, use the old remove API
|
// If not, use the old remove API
|
||||||
s.logger.Printf("[INFO] consul: removing server by address: %q", server.Address)
|
s.logger.Info("removing server by address", "address", server.Address)
|
||||||
future := s.raft.RemovePeer(raft.ServerAddress(addr))
|
future := s.raft.RemovePeer(raft.ServerAddress(addr))
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to remove raft peer '%v': %v",
|
s.logger.Error("failed to remove raft peer",
|
||||||
addr, err)
|
"address", addr,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
@ -1454,7 +1498,9 @@ func (s *Server) reapTombstones(index uint64) {
|
||||||
}
|
}
|
||||||
_, err := s.raftApply(structs.TombstoneRequestType, &req)
|
_, err := s.raftApply(structs.TombstoneRequestType, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to reap tombstones up to %d: %v",
|
s.logger.Error("failed to reap tombstones up to index",
|
||||||
index, err)
|
"index", index,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/connect/ca"
|
"github.com/hashicorp/consul/agent/connect/ca"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
uuid "github.com/hashicorp/go-uuid"
|
uuid "github.com/hashicorp/go-uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -163,6 +164,7 @@ func (s *Server) setCAProvider(newProvider ca.Provider, root *structs.CARoot) {
|
||||||
// the CA if this is the primary DC or making a remote RPC for intermediate signing
|
// the CA if this is the primary DC or making a remote RPC for intermediate signing
|
||||||
// if this is a secondary DC.
|
// if this is a secondary DC.
|
||||||
func (s *Server) initializeCA() error {
|
func (s *Server) initializeCA() error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
// Bail if connect isn't enabled.
|
// Bail if connect isn't enabled.
|
||||||
if !s.config.ConnectEnabled {
|
if !s.config.ConnectEnabled {
|
||||||
return nil
|
return nil
|
||||||
|
@ -186,12 +188,14 @@ func (s *Server) initializeCA() error {
|
||||||
if s.config.PrimaryDatacenter != s.config.Datacenter {
|
if s.config.PrimaryDatacenter != s.config.Datacenter {
|
||||||
versionOk, foundPrimary := ServersInDCMeetMinimumVersion(s.WANMembers(), s.config.PrimaryDatacenter, minMultiDCConnectVersion)
|
versionOk, foundPrimary := ServersInDCMeetMinimumVersion(s.WANMembers(), s.config.PrimaryDatacenter, minMultiDCConnectVersion)
|
||||||
if !foundPrimary {
|
if !foundPrimary {
|
||||||
s.logger.Printf("[WARN] connect: primary datacenter is configured but unreachable - deferring initialization of the secondary datacenter CA")
|
connectLogger.Warn("primary datacenter is configured but unreachable - deferring initialization of the secondary datacenter CA")
|
||||||
// return nil because we will initialize the secondary CA later
|
// return nil because we will initialize the secondary CA later
|
||||||
return nil
|
return nil
|
||||||
} else if !versionOk {
|
} else if !versionOk {
|
||||||
// return nil because we will initialize the secondary CA later
|
// return nil because we will initialize the secondary CA later
|
||||||
s.logger.Printf("[WARN] connect: servers in the primary datacenter are not at least at version %s - deferring initialization of the secondary datacenter CA", minMultiDCConnectVersion)
|
connectLogger.Warn("servers in the primary datacenter are not at least at the minimum version - deferring initialization of the secondary datacenter CA",
|
||||||
|
"min_version", minMultiDCConnectVersion.String(),
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +216,7 @@ func (s *Server) initializeCA() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[INFO] connect: initialized secondary datacenter CA with provider %q", conf.Provider)
|
connectLogger.Info("initialized secondary datacenter CA with provider", "provider", conf.Provider)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,6 +227,7 @@ func (s *Server) initializeCA() error {
|
||||||
// It is being called while holding caProviderReconfigurationLock
|
// It is being called while holding caProviderReconfigurationLock
|
||||||
// which means it must never take that lock itself or call anything that does.
|
// which means it must never take that lock itself or call anything that does.
|
||||||
func (s *Server) initializeRootCA(provider ca.Provider, conf *structs.CAConfiguration) error {
|
func (s *Server) initializeRootCA(provider ca.Provider, conf *structs.CAConfiguration) error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
pCfg := ca.ProviderConfig{
|
pCfg := ca.ProviderConfig{
|
||||||
ClusterID: conf.ClusterID,
|
ClusterID: conf.ClusterID,
|
||||||
Datacenter: s.config.Datacenter,
|
Datacenter: s.config.Datacenter,
|
||||||
|
@ -311,7 +316,7 @@ func (s *Server) initializeRootCA(provider ca.Provider, conf *structs.CAConfigur
|
||||||
Roots: []*structs.CARoot{rootCA},
|
Roots: []*structs.CARoot{rootCA},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] connect: Apply failed %v", err)
|
connectLogger.Error("Raft apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
@ -320,7 +325,7 @@ func (s *Server) initializeRootCA(provider ca.Provider, conf *structs.CAConfigur
|
||||||
|
|
||||||
s.setCAProvider(provider, rootCA)
|
s.setCAProvider(provider, rootCA)
|
||||||
|
|
||||||
s.logger.Printf("[INFO] connect: initialized primary datacenter CA with provider %q", conf.Provider)
|
connectLogger.Info("initialized primary datacenter CA with provider", "provider", conf.Provider)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -438,6 +443,7 @@ func (s *Server) initializeSecondaryCA(provider ca.Provider, primaryRoots struct
|
||||||
// persistNewRoot is being called while holding caProviderReconfigurationLock
|
// persistNewRoot is being called while holding caProviderReconfigurationLock
|
||||||
// which means it must never take that lock itself or call anything that does.
|
// which means it must never take that lock itself or call anything that does.
|
||||||
func (s *Server) persistNewRoot(provider ca.Provider, newActiveRoot *structs.CARoot) error {
|
func (s *Server) persistNewRoot(provider ca.Provider, newActiveRoot *structs.CARoot) error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
state := s.fsm.State()
|
state := s.fsm.State()
|
||||||
idx, oldRoots, err := state.CARoots(nil)
|
idx, oldRoots, err := state.CARoots(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -493,13 +499,14 @@ func (s *Server) persistNewRoot(provider ca.Provider, newActiveRoot *structs.CAR
|
||||||
return fmt.Errorf("could not atomically update roots and config")
|
return fmt.Errorf("could not atomically update roots and config")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[INFO] connect: updated root certificates from primary datacenter")
|
connectLogger.Info("updated root certificates from primary datacenter")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getIntermediateCASigned is being called while holding caProviderReconfigurationLock
|
// getIntermediateCASigned is being called while holding caProviderReconfigurationLock
|
||||||
// which means it must never take that lock itself or call anything that does.
|
// which means it must never take that lock itself or call anything that does.
|
||||||
func (s *Server) getIntermediateCASigned(provider ca.Provider, newActiveRoot *structs.CARoot) error {
|
func (s *Server) getIntermediateCASigned(provider ca.Provider, newActiveRoot *structs.CARoot) error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
csr, err := provider.GenerateIntermediateCSR()
|
csr, err := provider.GenerateIntermediateCSR()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -508,7 +515,7 @@ func (s *Server) getIntermediateCASigned(provider ca.Provider, newActiveRoot *st
|
||||||
var intermediatePEM string
|
var intermediatePEM string
|
||||||
if err := s.forwardDC("ConnectCA.SignIntermediate", s.config.PrimaryDatacenter, s.generateCASignRequest(csr), &intermediatePEM); err != nil {
|
if err := s.forwardDC("ConnectCA.SignIntermediate", s.config.PrimaryDatacenter, s.generateCASignRequest(csr), &intermediatePEM); err != nil {
|
||||||
// this is a failure in the primary and shouldn't be capable of erroring out our establishing leadership
|
// this is a failure in the primary and shouldn't be capable of erroring out our establishing leadership
|
||||||
s.logger.Printf("[WARN] connect: Primary datacenter refused to sign our intermediate CA certificate: %v", err)
|
connectLogger.Warn("Primary datacenter refused to sign our intermediate CA certificate", "error", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -526,7 +533,7 @@ func (s *Server) getIntermediateCASigned(provider ca.Provider, newActiveRoot *st
|
||||||
newActiveRoot.IntermediateCerts = append(newActiveRoot.IntermediateCerts, intermediatePEM)
|
newActiveRoot.IntermediateCerts = append(newActiveRoot.IntermediateCerts, intermediatePEM)
|
||||||
newActiveRoot.SigningKeyID = connect.EncodeSigningKeyID(intermediateCert.SubjectKeyId)
|
newActiveRoot.SigningKeyID = connect.EncodeSigningKeyID(intermediateCert.SubjectKeyId)
|
||||||
|
|
||||||
s.logger.Printf("[INFO] connect: received new intermediate certificate from primary datacenter")
|
connectLogger.Info("received new intermediate certificate from primary datacenter")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -567,7 +574,7 @@ func (s *Server) runCARootPruning(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if err := s.pruneCARoots(); err != nil {
|
if err := s.pruneCARoots(); err != nil {
|
||||||
s.logger.Printf("[ERR] connect: error pruning CA roots: %v", err)
|
s.loggers.Named(logging.Connect).Error("error pruning CA roots", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -598,7 +605,7 @@ func (s *Server) pruneCARoots() error {
|
||||||
var newRoots structs.CARoots
|
var newRoots structs.CARoots
|
||||||
for _, r := range roots {
|
for _, r := range roots {
|
||||||
if !r.Active && !r.RotatedOutAt.IsZero() && time.Now().Sub(r.RotatedOutAt) > common.LeafCertTTL*2 {
|
if !r.Active && !r.RotatedOutAt.IsZero() && time.Now().Sub(r.RotatedOutAt) > common.LeafCertTTL*2 {
|
||||||
s.logger.Printf("[INFO] connect: pruning old unused root CA (ID: %s)", r.ID)
|
s.loggers.Named(logging.Connect).Info("pruning old unused root CA", "id", r.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newRoot := *r
|
newRoot := *r
|
||||||
|
@ -630,6 +637,8 @@ func (s *Server) pruneCARoots() error {
|
||||||
// expiration. As soon as more than half the time a cert is valid has passed,
|
// expiration. As soon as more than half the time a cert is valid has passed,
|
||||||
// it will try to renew it.
|
// it will try to renew it.
|
||||||
func (s *Server) secondaryIntermediateCertRenewalWatch(ctx context.Context) error {
|
func (s *Server) secondaryIntermediateCertRenewalWatch(ctx context.Context) error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -684,7 +693,10 @@ func (s *Server) secondaryIntermediateCertRenewalWatch(ctx context.Context) erro
|
||||||
s.setCAProvider(provider, activeRoot)
|
s.setCAProvider(provider, activeRoot)
|
||||||
return nil
|
return nil
|
||||||
}, func(err error) {
|
}, func(err error) {
|
||||||
s.logger.Printf("[ERR] connect: %s: %v", secondaryCertRenewWatchRoutineName, err)
|
connectLogger.Error("error renewing intermediate certs",
|
||||||
|
"routine", secondaryCertRenewWatchRoutineName,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -694,6 +706,7 @@ func (s *Server) secondaryIntermediateCertRenewalWatch(ctx context.Context) erro
|
||||||
// ConnectCA.Roots endpoint to monitor when it needs to request a new signed
|
// ConnectCA.Roots endpoint to monitor when it needs to request a new signed
|
||||||
// intermediate certificate.
|
// intermediate certificate.
|
||||||
func (s *Server) secondaryCARootWatch(ctx context.Context) error {
|
func (s *Server) secondaryCARootWatch(ctx context.Context) error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
args := structs.DCSpecificRequest{
|
args := structs.DCSpecificRequest{
|
||||||
Datacenter: s.config.PrimaryDatacenter,
|
Datacenter: s.config.PrimaryDatacenter,
|
||||||
QueryOptions: structs.QueryOptions{
|
QueryOptions: structs.QueryOptions{
|
||||||
|
@ -702,7 +715,7 @@ func (s *Server) secondaryCARootWatch(ctx context.Context) error {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] connect: starting Connect CA root replication from primary datacenter %q", s.config.PrimaryDatacenter)
|
connectLogger.Debug("starting Connect CA root replication from primary datacenter", "primary", s.config.PrimaryDatacenter)
|
||||||
|
|
||||||
retryLoopBackoff(ctx.Done(), func() error {
|
retryLoopBackoff(ctx.Done(), func() error {
|
||||||
var roots structs.IndexedCARoots
|
var roots structs.IndexedCARoots
|
||||||
|
@ -741,7 +754,10 @@ func (s *Server) secondaryCARootWatch(ctx context.Context) error {
|
||||||
args.QueryOptions.MinQueryIndex = nextIndexVal(args.QueryOptions.MinQueryIndex, roots.QueryMeta.Index)
|
args.QueryOptions.MinQueryIndex = nextIndexVal(args.QueryOptions.MinQueryIndex, roots.QueryMeta.Index)
|
||||||
return nil
|
return nil
|
||||||
}, func(err error) {
|
}, func(err error) {
|
||||||
s.logger.Printf("[ERR] connect: %s: %v", secondaryCARootWatchRoutineName, err)
|
connectLogger.Error("CA root replication failed, will retry",
|
||||||
|
"routine", secondaryCARootWatchRoutineName,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -750,11 +766,12 @@ func (s *Server) secondaryCARootWatch(ctx context.Context) error {
|
||||||
// replicateIntentions executes a blocking query to the primary datacenter to replicate
|
// replicateIntentions executes a blocking query to the primary datacenter to replicate
|
||||||
// the intentions there to the local state.
|
// the intentions there to the local state.
|
||||||
func (s *Server) replicateIntentions(ctx context.Context) error {
|
func (s *Server) replicateIntentions(ctx context.Context) error {
|
||||||
|
connectLogger := s.loggers.Named(logging.Connect)
|
||||||
args := structs.DCSpecificRequest{
|
args := structs.DCSpecificRequest{
|
||||||
Datacenter: s.config.PrimaryDatacenter,
|
Datacenter: s.config.PrimaryDatacenter,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[DEBUG] connect: starting Connect intention replication from primary datacenter %q", s.config.PrimaryDatacenter)
|
connectLogger.Debug("starting Connect intention replication from primary datacenter", "primary", s.config.PrimaryDatacenter)
|
||||||
|
|
||||||
retryLoopBackoff(ctx.Done(), func() error {
|
retryLoopBackoff(ctx.Done(), func() error {
|
||||||
// Always use the latest replication token value in case it changed while looping.
|
// Always use the latest replication token value in case it changed while looping.
|
||||||
|
@ -798,7 +815,10 @@ func (s *Server) replicateIntentions(ctx context.Context) error {
|
||||||
args.QueryOptions.MinQueryIndex = nextIndexVal(args.QueryOptions.MinQueryIndex, remote.QueryMeta.Index)
|
args.QueryOptions.MinQueryIndex = nextIndexVal(args.QueryOptions.MinQueryIndex, remote.QueryMeta.Index)
|
||||||
return nil
|
return nil
|
||||||
}, func(err error) {
|
}, func(err error) {
|
||||||
s.logger.Printf("[ERR] connect: %s: %v", intentionReplicationRoutineName, err)
|
connectLogger.Error("error replicating intentions",
|
||||||
|
"routine", intentionReplicationRoutineName,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,11 @@ package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LeaderRoutine func(ctx context.Context) error
|
type LeaderRoutine func(ctx context.Context) error
|
||||||
|
@ -16,18 +18,20 @@ type leaderRoutine struct {
|
||||||
|
|
||||||
type LeaderRoutineManager struct {
|
type LeaderRoutineManager struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
routines map[string]*leaderRoutine
|
routines map[string]*leaderRoutine
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLeaderRoutineManager(logger *log.Logger) *LeaderRoutineManager {
|
func NewLeaderRoutineManager(logger hclog.Logger) *LeaderRoutineManager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
logger = hclog.New(&hclog.LoggerOptions{
|
||||||
|
Output: os.Stderr,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return &LeaderRoutineManager{
|
return &LeaderRoutineManager{
|
||||||
logger: logger,
|
logger: logger.Named(logging.Leader),
|
||||||
routines: make(map[string]*leaderRoutine),
|
routines: make(map[string]*leaderRoutine),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,9 +72,12 @@ func (m *LeaderRoutineManager) StartWithContext(parentCtx context.Context, name
|
||||||
go func() {
|
go func() {
|
||||||
err := routine(ctx)
|
err := routine(ctx)
|
||||||
if err != nil && err != context.DeadlineExceeded && err != context.Canceled {
|
if err != nil && err != context.DeadlineExceeded && err != context.Canceled {
|
||||||
m.logger.Printf("[ERROR] leader: %s routine exited with error: %v", name, err)
|
m.logger.Error("routine exited with error",
|
||||||
|
"routine", name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
m.logger.Printf("[DEBUG] leader: stopped %s routine", name)
|
m.logger.Debug("stopped routine", "routine", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
|
@ -79,7 +86,7 @@ func (m *LeaderRoutineManager) StartWithContext(parentCtx context.Context, name
|
||||||
}()
|
}()
|
||||||
|
|
||||||
m.routines[name] = instance
|
m.routines[name] = instance
|
||||||
m.logger.Printf("[INFO] leader: started %s routine", name)
|
m.logger.Info("started routine", "routine", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +104,7 @@ func (m *LeaderRoutineManager) Stop(name string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.logger.Printf("[DEBUG] leader: stopping %s routine", name)
|
m.logger.Debug("stopping routine", "routine", name)
|
||||||
instance.cancel()
|
instance.cancel()
|
||||||
delete(m.routines, name)
|
delete(m.routines, name)
|
||||||
return nil
|
return nil
|
||||||
|
@ -111,7 +118,7 @@ func (m *LeaderRoutineManager) StopAll() {
|
||||||
if !routine.running {
|
if !routine.running {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m.logger.Printf("[DEBUG] leader: stopping %s routine", name)
|
m.logger.Debug("stopping routine", "routine", name)
|
||||||
routine.cancel()
|
routine.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ func TestLeaderRoutineManager(t *testing.T) {
|
||||||
var running uint32
|
var running uint32
|
||||||
// tlog := testutil.NewCancellableTestLogger(t)
|
// tlog := testutil.NewCancellableTestLogger(t)
|
||||||
// defer tlog.Cancel()
|
// defer tlog.Cancel()
|
||||||
mgr := NewLeaderRoutineManager(testutil.TestLogger(t))
|
mgr := NewLeaderRoutineManager(testutil.Logger(t))
|
||||||
|
|
||||||
run := func(ctx context.Context) error {
|
run := func(ctx context.Context) error {
|
||||||
atomic.StoreUint32(&running, 1)
|
atomic.StoreUint32(&running, 1)
|
||||||
|
|
|
@ -1177,11 +1177,11 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
|
||||||
for scan.Scan() {
|
for scan.Scan() {
|
||||||
line := scan.Text()
|
line := scan.Text()
|
||||||
|
|
||||||
if strings.Contains(line, "consul: failed to establish leadership") {
|
if strings.Contains(line, "failed to establish leadership") {
|
||||||
ch <- ""
|
ch <- ""
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if strings.Contains(line, "connect: initialized primary datacenter") {
|
if strings.Contains(line, "initialized primary datacenter") {
|
||||||
ch <- "leadership should not have gotten here if config entries properly failed"
|
ch <- "leadership should not have gotten here if config entries properly failed"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type loggerStore struct {
|
||||||
|
root hclog.Logger
|
||||||
|
l sync.Mutex
|
||||||
|
cache map[string]hclog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLoggerStore(root hclog.Logger) *loggerStore {
|
||||||
|
return &loggerStore{
|
||||||
|
root: root,
|
||||||
|
cache: make(map[string]hclog.Logger),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ls *loggerStore) Named(name string) hclog.Logger {
|
||||||
|
ls.l.Lock()
|
||||||
|
defer ls.l.Unlock()
|
||||||
|
l, ok := ls.cache[name]
|
||||||
|
if !ok {
|
||||||
|
l = ls.root.Named(name)
|
||||||
|
ls.cache[name] = l
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoggerStore_Named(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
logger := testutil.Logger(t)
|
||||||
|
store := newLoggerStore(logger)
|
||||||
|
require.NotNil(store)
|
||||||
|
|
||||||
|
l1 := store.Named("test1")
|
||||||
|
l2 := store.Named("test2")
|
||||||
|
require.Truef(
|
||||||
|
l1 != l2,
|
||||||
|
"expected %p and %p to have a different memory address",
|
||||||
|
l1,
|
||||||
|
l2,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoggerStore_NamedCache(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
logger := testutil.Logger(t)
|
||||||
|
store := newLoggerStore(logger)
|
||||||
|
require.NotNil(store)
|
||||||
|
|
||||||
|
l1 := store.Named("test")
|
||||||
|
l2 := store.Named("test")
|
||||||
|
require.Truef(
|
||||||
|
l1 == l2,
|
||||||
|
"expected %p and %p to have the same memory address",
|
||||||
|
l1,
|
||||||
|
l2,
|
||||||
|
)
|
||||||
|
}
|
|
@ -55,7 +55,7 @@ func (op *Operator) AutopilotSetConfiguration(args *structs.AutopilotSetConfigRe
|
||||||
// Apply the update
|
// Apply the update
|
||||||
resp, err := op.srv.raftApply(structs.AutopilotRequestType, args)
|
resp, err := op.srv.raftApply(structs.AutopilotRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
op.srv.logger.Printf("[ERR] consul.operator: Apply failed: %v", err)
|
op.logger.Error("Raft apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package consul
|
package consul
|
||||||
|
|
||||||
|
import "github.com/hashicorp/go-hclog"
|
||||||
|
|
||||||
// Operator endpoint is used to perform low-level operator tasks for Consul.
|
// Operator endpoint is used to perform low-level operator tasks for Consul.
|
||||||
type Operator struct {
|
type Operator struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,12 +127,14 @@ REMOVE:
|
||||||
future = op.srv.raft.RemovePeer(args.Address)
|
future = op.srv.raft.RemovePeer(args.Address)
|
||||||
}
|
}
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
op.srv.logger.Printf("[WARN] consul.operator: Failed to remove Raft peer %q: %v",
|
op.logger.Warn("Failed to remove Raft peer",
|
||||||
args.Address, err)
|
"peer", args.Address,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
op.srv.logger.Printf("[WARN] consul.operator: Removed Raft peer %q", args.Address)
|
op.logger.Warn("Removed Raft peer", "peer", args.Address)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,11 +196,13 @@ REMOVE:
|
||||||
future = op.srv.raft.RemovePeer(args.Address)
|
future = op.srv.raft.RemovePeer(args.Address)
|
||||||
}
|
}
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
op.srv.logger.Printf("[WARN] consul.operator: Failed to remove Raft peer with id %q: %v",
|
op.logger.Warn("Failed to remove Raft peer with id",
|
||||||
args.ID, err)
|
"peer_id", args.ID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
op.srv.logger.Printf("[WARN] consul.operator: Removed Raft peer with id %q", args.ID)
|
op.logger.Warn("Removed Raft peer with id", "peer_id", args.ID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package consul
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -11,6 +10,8 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
)
|
)
|
||||||
|
@ -22,7 +23,8 @@ var (
|
||||||
|
|
||||||
// PreparedQuery manages the prepared query endpoint.
|
// PreparedQuery manages the prepared query endpoint.
|
||||||
type PreparedQuery struct {
|
type PreparedQuery struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply is used to apply a modifying request to the data store. This should
|
// Apply is used to apply a modifying request to the data store. This should
|
||||||
|
@ -70,7 +72,7 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
|
||||||
// proposing.
|
// proposing.
|
||||||
if prefix, ok := args.Query.GetACLPrefix(); ok {
|
if prefix, ok := args.Query.GetACLPrefix(); ok {
|
||||||
if rule != nil && rule.PreparedQueryWrite(prefix, nil) != acl.Allow {
|
if rule != nil && rule.PreparedQueryWrite(prefix, nil) != acl.Allow {
|
||||||
p.srv.logger.Printf("[WARN] consul.prepared_query: Operation on prepared query '%s' denied due to ACLs", args.Query.ID)
|
p.logger.Warn("Operation on prepared query denied due to ACLs", "query", args.Query.ID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,7 +92,7 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
|
||||||
|
|
||||||
if prefix, ok := query.GetACLPrefix(); ok {
|
if prefix, ok := query.GetACLPrefix(); ok {
|
||||||
if rule != nil && rule.PreparedQueryWrite(prefix, nil) != acl.Allow {
|
if rule != nil && rule.PreparedQueryWrite(prefix, nil) != acl.Allow {
|
||||||
p.srv.logger.Printf("[WARN] consul.prepared_query: Operation on prepared query '%s' denied due to ACLs", args.Query.ID)
|
p.logger.Warn("Operation on prepared query denied due to ACLs", "query", args.Query.ID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -114,7 +116,7 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
|
||||||
// Commit the query to the state store.
|
// Commit the query to the state store.
|
||||||
resp, err := p.srv.raftApply(structs.PreparedQueryRequestType, args)
|
resp, err := p.srv.raftApply(structs.PreparedQueryRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.srv.logger.Printf("[ERR] consul.prepared_query: Apply failed %v", err)
|
p.logger.Error("Raft apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
@ -249,7 +251,7 @@ func (p *PreparedQuery) Get(args *structs.PreparedQuerySpecificRequest,
|
||||||
// prevented us from returning something that exists,
|
// prevented us from returning something that exists,
|
||||||
// then alert the user with a permission denied error.
|
// then alert the user with a permission denied error.
|
||||||
if len(reply.Queries) == 0 {
|
if len(reply.Queries) == 0 {
|
||||||
p.srv.logger.Printf("[WARN] consul.prepared_query: Request to get prepared query '%s' denied due to ACLs", args.QueryID)
|
p.logger.Warn("Request to get prepared query denied due to ACLs", "query", args.QueryID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,7 +319,7 @@ func (p *PreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest,
|
||||||
|
|
||||||
// If the query was filtered out, return an error.
|
// If the query was filtered out, return an error.
|
||||||
if len(queries.Queries) == 0 {
|
if len(queries.Queries) == 0 {
|
||||||
p.srv.logger.Printf("[WARN] consul.prepared_query: Explain on prepared query '%s' denied due to ACLs", query.ID)
|
p.logger.Warn("Explain on prepared query denied due to ACLs", "query", query.ID)
|
||||||
return acl.ErrPermissionDenied
|
return acl.ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,7 +406,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
p.srv.logger.Printf("[WARN] Prepared Query using near=_ip requires " +
|
p.logger.Warn("Prepared Query using near=_ip requires " +
|
||||||
"the source IP to be set but none was provided. No distance " +
|
"the source IP to be set but none was provided. No distance " +
|
||||||
"sorting will be done.")
|
"sorting will be done.")
|
||||||
|
|
||||||
|
@ -633,7 +635,7 @@ func serviceMetaFilter(filters map[string]string, nodes structs.CheckServiceNode
|
||||||
|
|
||||||
// queryServer is a wrapper that makes it easier to test the failover logic.
|
// queryServer is a wrapper that makes it easier to test the failover logic.
|
||||||
type queryServer interface {
|
type queryServer interface {
|
||||||
GetLogger() *log.Logger
|
GetLogger() hclog.Logger
|
||||||
GetOtherDatacentersByDistance() ([]string, error)
|
GetOtherDatacentersByDistance() ([]string, error)
|
||||||
ForwardDC(method, dc string, args interface{}, reply interface{}) error
|
ForwardDC(method, dc string, args interface{}, reply interface{}) error
|
||||||
}
|
}
|
||||||
|
@ -644,8 +646,8 @@ type queryServerWrapper struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLogger returns the server's logger.
|
// GetLogger returns the server's logger.
|
||||||
func (q *queryServerWrapper) GetLogger() *log.Logger {
|
func (q *queryServerWrapper) GetLogger() hclog.Logger {
|
||||||
return q.srv.logger
|
return q.srv.loggers.Named(logging.PreparedQuery)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOtherDatacentersByDistance calls into the server's fn and filters out the
|
// GetOtherDatacentersByDistance calls into the server's fn and filters out the
|
||||||
|
@ -711,7 +713,7 @@ func queryFailover(q queryServer, query *structs.PreparedQuery,
|
||||||
// This will prevent a log of other log spammage if we do not
|
// This will prevent a log of other log spammage if we do not
|
||||||
// attempt to talk to datacenters we don't know about.
|
// attempt to talk to datacenters we don't know about.
|
||||||
if _, ok := known[dc]; !ok {
|
if _, ok := known[dc]; !ok {
|
||||||
q.GetLogger().Printf("[DEBUG] consul.prepared_query: Skipping unknown datacenter '%s' in prepared query", dc)
|
q.GetLogger().Debug("Skipping unknown datacenter in prepared query", "datacenter", dc)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -748,7 +750,11 @@ func queryFailover(q queryServer, query *structs.PreparedQuery,
|
||||||
Connect: args.Connect,
|
Connect: args.Connect,
|
||||||
}
|
}
|
||||||
if err := q.ForwardDC("PreparedQuery.ExecuteRemote", dc, remote, reply); err != nil {
|
if err := q.ForwardDC("PreparedQuery.ExecuteRemote", dc, remote, reply); err != nil {
|
||||||
q.GetLogger().Printf("[WARN] consul.prepared_query: Failed querying for service '%s' in datacenter '%s': %s", query.Service.Service, dc, err)
|
q.GetLogger().Warn("Failed querying for service in datacenter",
|
||||||
|
"service", query.Service.Service,
|
||||||
|
"datacenter", dc,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package consul
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/rpc"
|
"net/rpc"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -19,7 +18,8 @@ import (
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
"github.com/hashicorp/go-hclog"
|
||||||
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||||
"github.com/hashicorp/serf/coordinate"
|
"github.com/hashicorp/serf/coordinate"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -1464,14 +1464,15 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
||||||
|
|
||||||
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
||||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
joinWAN(t, s2, s1)
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||||
|
|
||||||
// Try to WAN join.
|
// Try to WAN join.
|
||||||
|
joinWAN(t, s2, s1)
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
if got, want := len(s1.WANMembers()), 2; got != want {
|
if got, want := len(s1.WANMembers()), 2; got != want {
|
||||||
r.Fatalf("got %d WAN members want %d", got, want)
|
r.Fatalf("got %d WAN members want %d", got, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
||||||
|
|
||||||
// Create an ACL with read permission to the service.
|
// Create an ACL with read permission to the service.
|
||||||
var execToken string
|
var execToken string
|
||||||
|
@ -2960,16 +2961,17 @@ func TestPreparedQuery_Wrapper(t *testing.T) {
|
||||||
|
|
||||||
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
|
||||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||||
|
|
||||||
// Try to WAN join.
|
// Try to WAN join.
|
||||||
joinWAN(t, s2, s1)
|
joinWAN(t, s2, s1)
|
||||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
|
||||||
|
|
||||||
// Try all the operations on a real server via the wrapper.
|
// Try all the operations on a real server via the wrapper.
|
||||||
wrapper := &queryServerWrapper{s1}
|
wrapper := &queryServerWrapper{s1}
|
||||||
wrapper.GetLogger().Printf("[DEBUG] Test")
|
wrapper.GetLogger().Debug("Test")
|
||||||
|
|
||||||
ret, err := wrapper.GetOtherDatacentersByDistance()
|
ret, err := wrapper.GetOtherDatacentersByDistance()
|
||||||
wrapper.GetLogger().Println("Returned value: ", ret)
|
wrapper.GetLogger().Info("Returned value", "value", ret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -2988,7 +2990,7 @@ type mockQueryServer struct {
|
||||||
DatacentersError error
|
DatacentersError error
|
||||||
QueryLog []string
|
QueryLog []string
|
||||||
QueryFn func(dc string, args interface{}, reply interface{}) error
|
QueryFn func(dc string, args interface{}, reply interface{}) error
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
LogBuffer *bytes.Buffer
|
LogBuffer *bytes.Buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2996,10 +2998,15 @@ func (m *mockQueryServer) JoinQueryLog() string {
|
||||||
return strings.Join(m.QueryLog, "|")
|
return strings.Join(m.QueryLog, "|")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockQueryServer) GetLogger() *log.Logger {
|
func (m *mockQueryServer) GetLogger() hclog.Logger {
|
||||||
if m.Logger == nil {
|
if m.Logger == nil {
|
||||||
m.LogBuffer = new(bytes.Buffer)
|
m.LogBuffer = new(bytes.Buffer)
|
||||||
m.Logger = log.New(m.LogBuffer, "", 0)
|
|
||||||
|
m.Logger = hclog.New(&hclog.LoggerOptions{
|
||||||
|
Name: "mock_query",
|
||||||
|
Output: m.LogBuffer,
|
||||||
|
Level: hclog.Debug,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return m.Logger
|
return m.Logger
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,13 @@ package consul
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metrics "github.com/armon/go-metrics"
|
metrics "github.com/armon/go-metrics"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ReplicatorDelegate interface {
|
type ReplicatorDelegate interface {
|
||||||
Replicate(ctx context.Context, lastRemoteIndex uint64) (index uint64, exit bool, err error)
|
Replicate(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (index uint64, exit bool, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReplicatorConfig struct {
|
type ReplicatorConfig struct {
|
||||||
|
@ -39,16 +39,18 @@ type ReplicatorConfig struct {
|
||||||
// Maximum wait time between failing RPCs
|
// Maximum wait time between failing RPCs
|
||||||
MaxRetryWait time.Duration
|
MaxRetryWait time.Duration
|
||||||
// Where to send our logs
|
// Where to send our logs
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
|
// Function to use for determining if an error should be suppressed
|
||||||
|
SuppressErrorLog func(err error) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type Replicator struct {
|
type Replicator struct {
|
||||||
name string
|
limiter *rate.Limiter
|
||||||
limiter *rate.Limiter
|
waiter *lib.RetryWaiter
|
||||||
waiter *lib.RetryWaiter
|
delegate ReplicatorDelegate
|
||||||
delegate ReplicatorDelegate
|
logger hclog.Logger
|
||||||
logger *log.Logger
|
lastRemoteIndex uint64
|
||||||
lastRemoteIndex uint64
|
suppressErrorLog func(err error) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReplicator(config *ReplicatorConfig) (*Replicator, error) {
|
func NewReplicator(config *ReplicatorConfig) (*Replicator, error) {
|
||||||
|
@ -59,7 +61,8 @@ func NewReplicator(config *ReplicatorConfig) (*Replicator, error) {
|
||||||
return nil, fmt.Errorf("Cannot create the Replicator without a Delegate set in the config")
|
return nil, fmt.Errorf("Cannot create the Replicator without a Delegate set in the config")
|
||||||
}
|
}
|
||||||
if config.Logger == nil {
|
if config.Logger == nil {
|
||||||
config.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
logger := hclog.New(&hclog.LoggerOptions{})
|
||||||
|
config.Logger = logger
|
||||||
}
|
}
|
||||||
limiter := rate.NewLimiter(rate.Limit(config.Rate), config.Burst)
|
limiter := rate.NewLimiter(rate.Limit(config.Rate), config.Burst)
|
||||||
|
|
||||||
|
@ -74,16 +77,16 @@ func NewReplicator(config *ReplicatorConfig) (*Replicator, error) {
|
||||||
}
|
}
|
||||||
waiter := lib.NewRetryWaiter(minFailures, 0*time.Second, maxWait, lib.NewJitterRandomStagger(10))
|
waiter := lib.NewRetryWaiter(minFailures, 0*time.Second, maxWait, lib.NewJitterRandomStagger(10))
|
||||||
return &Replicator{
|
return &Replicator{
|
||||||
name: config.Name,
|
limiter: limiter,
|
||||||
limiter: limiter,
|
waiter: waiter,
|
||||||
waiter: waiter,
|
delegate: config.Delegate,
|
||||||
delegate: config.Delegate,
|
logger: config.Logger.Named(logging.Replication).Named(config.Name),
|
||||||
logger: config.Logger,
|
suppressErrorLog: config.SuppressErrorLog,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Replicator) Run(ctx context.Context) error {
|
func (r *Replicator) Run(ctx context.Context) error {
|
||||||
defer r.logger.Printf("[INFO] replication: stopped %s replication", r.name)
|
defer r.logger.Info("stopped replication")
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// This ensures we aren't doing too many successful replication rounds - mostly useful when
|
// This ensures we aren't doing too many successful replication rounds - mostly useful when
|
||||||
|
@ -94,7 +97,7 @@ func (r *Replicator) Run(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform a single round of replication
|
// Perform a single round of replication
|
||||||
index, exit, err := r.delegate.Replicate(ctx, atomic.LoadUint64(&r.lastRemoteIndex))
|
index, exit, err := r.delegate.Replicate(ctx, atomic.LoadUint64(&r.lastRemoteIndex), r.logger)
|
||||||
if exit {
|
if exit {
|
||||||
// the replication function told us to exit
|
// the replication function told us to exit
|
||||||
return nil
|
return nil
|
||||||
|
@ -104,10 +107,13 @@ func (r *Replicator) Run(ctx context.Context) error {
|
||||||
// reset the lastRemoteIndex when there is an RPC failure. This should cause a full sync to be done during
|
// reset the lastRemoteIndex when there is an RPC failure. This should cause a full sync to be done during
|
||||||
// the next round of replication
|
// the next round of replication
|
||||||
atomic.StoreUint64(&r.lastRemoteIndex, 0)
|
atomic.StoreUint64(&r.lastRemoteIndex, 0)
|
||||||
r.logger.Printf("[WARN] replication: %s replication error (will retry if still leader): %v", r.name, err)
|
|
||||||
|
if r.suppressErrorLog != nil && !r.suppressErrorLog(err) {
|
||||||
|
r.logger.Warn("replication error (will retry if still leader)", "error", err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
atomic.StoreUint64(&r.lastRemoteIndex, index)
|
atomic.StoreUint64(&r.lastRemoteIndex, index)
|
||||||
r.logger.Printf("[DEBUG] replication: %s replication completed through remote index %d", r.name, index)
|
r.logger.Debug("replication completed through remote index", "index", index)
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
@ -124,14 +130,14 @@ func (r *Replicator) Index() uint64 {
|
||||||
return atomic.LoadUint64(&r.lastRemoteIndex)
|
return atomic.LoadUint64(&r.lastRemoteIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReplicatorFunc func(ctx context.Context, lastRemoteIndex uint64) (index uint64, exit bool, err error)
|
type ReplicatorFunc func(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (index uint64, exit bool, err error)
|
||||||
|
|
||||||
type FunctionReplicator struct {
|
type FunctionReplicator struct {
|
||||||
ReplicateFn ReplicatorFunc
|
ReplicateFn ReplicatorFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *FunctionReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (r *FunctionReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (uint64, bool, error) {
|
||||||
return r.ReplicateFn(ctx, lastRemoteIndex)
|
return r.ReplicateFn(ctx, lastRemoteIndex, logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
type IndexReplicatorDiff struct {
|
type IndexReplicatorDiff struct {
|
||||||
|
@ -168,10 +174,10 @@ type IndexReplicatorDelegate interface {
|
||||||
|
|
||||||
type IndexReplicator struct {
|
type IndexReplicator struct {
|
||||||
Delegate IndexReplicatorDelegate
|
Delegate IndexReplicatorDelegate
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64, _ hclog.Logger) (uint64, bool, error) {
|
||||||
fetchStart := time.Now()
|
fetchStart := time.Now()
|
||||||
lenRemote, remote, remoteIndex, err := r.Delegate.FetchRemote(lastRemoteIndex)
|
lenRemote, remote, remoteIndex, err := r.Delegate.FetchRemote(lastRemoteIndex)
|
||||||
metrics.MeasureSince([]string{"leader", "replication", r.Delegate.MetricName(), "fetch"}, fetchStart)
|
metrics.MeasureSince([]string{"leader", "replication", r.Delegate.MetricName(), "fetch"}, fetchStart)
|
||||||
|
@ -180,7 +186,9 @@ func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64)
|
||||||
return 0, false, fmt.Errorf("failed to retrieve %s: %v", r.Delegate.PluralNoun(), err)
|
return 0, false, fmt.Errorf("failed to retrieve %s: %v", r.Delegate.PluralNoun(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Logger.Printf("[DEBUG] replication: finished fetching %s: %d", r.Delegate.PluralNoun(), lenRemote)
|
r.Logger.Debug("finished fetching remote objects",
|
||||||
|
"amount", lenRemote,
|
||||||
|
)
|
||||||
|
|
||||||
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
// Need to check if we should be stopping. This will be common as the fetching process is a blocking
|
||||||
// RPC which could have been hanging around for a long time and during that time leadership could
|
// RPC which could have been hanging around for a long time and during that time leadership could
|
||||||
|
@ -216,11 +224,17 @@ func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64)
|
||||||
// The lastRemoteIndex is not used when the entry exists either only in the local state or
|
// The lastRemoteIndex is not used when the entry exists either only in the local state or
|
||||||
// only in the remote state. In those situations we need to either delete it or create it.
|
// only in the remote state. In those situations we need to either delete it or create it.
|
||||||
if remoteIndex < lastRemoteIndex {
|
if remoteIndex < lastRemoteIndex {
|
||||||
r.Logger.Printf("[WARN] replication: %[1]s replication remote index moved backwards (%d to %d), forcing a full %[1]s sync", r.Delegate.SingularNoun(), lastRemoteIndex, remoteIndex)
|
r.Logger.Warn("replication remote index moved backwards, forcing a full sync",
|
||||||
|
"from", lastRemoteIndex,
|
||||||
|
"to", remoteIndex,
|
||||||
|
)
|
||||||
lastRemoteIndex = 0
|
lastRemoteIndex = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Logger.Printf("[DEBUG] replication: %s replication - local: %d, remote: %d", r.Delegate.SingularNoun(), lenLocal, lenRemote)
|
r.Logger.Debug("diffing replication state",
|
||||||
|
"local_amount", lenLocal,
|
||||||
|
"remote_amount", lenRemote,
|
||||||
|
)
|
||||||
|
|
||||||
// Calculate the changes required to bring the state into sync and then
|
// Calculate the changes required to bring the state into sync and then
|
||||||
// apply them.
|
// apply them.
|
||||||
|
@ -229,10 +243,15 @@ func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64)
|
||||||
return 0, false, fmt.Errorf("failed to diff %s local and remote states: %v", r.Delegate.SingularNoun(), err)
|
return 0, false, fmt.Errorf("failed to diff %s local and remote states: %v", r.Delegate.SingularNoun(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Logger.Printf("[DEBUG] replication: %s replication - deletions: %d, updates: %d", r.Delegate.SingularNoun(), diff.NumDeletions, diff.NumUpdates)
|
r.Logger.Debug("diffed replication state",
|
||||||
|
"deletions", diff.NumDeletions,
|
||||||
|
"updates", diff.NumUpdates,
|
||||||
|
)
|
||||||
|
|
||||||
if diff.NumDeletions > 0 {
|
if diff.NumDeletions > 0 {
|
||||||
r.Logger.Printf("[DEBUG] replication: %s replication - performing %d deletions", r.Delegate.SingularNoun(), diff.NumDeletions)
|
r.Logger.Debug("performing deletions",
|
||||||
|
"deletions", diff.NumDeletions,
|
||||||
|
)
|
||||||
|
|
||||||
exit, err := r.Delegate.PerformDeletions(ctx, diff.Deletions)
|
exit, err := r.Delegate.PerformDeletions(ctx, diff.Deletions)
|
||||||
if exit {
|
if exit {
|
||||||
|
@ -242,11 +261,13 @@ func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to apply local %s deletions: %v", r.Delegate.SingularNoun(), err)
|
return 0, false, fmt.Errorf("failed to apply local %s deletions: %v", r.Delegate.SingularNoun(), err)
|
||||||
}
|
}
|
||||||
r.Logger.Printf("[DEBUG] replication: %s replication - finished deletions", r.Delegate.SingularNoun())
|
r.Logger.Debug("finished deletions")
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff.NumUpdates > 0 {
|
if diff.NumUpdates > 0 {
|
||||||
r.Logger.Printf("[DEBUG] replication: %s replication - performing %d updates", r.Delegate.SingularNoun(), diff.NumUpdates)
|
r.Logger.Debug("performing updates",
|
||||||
|
"updates", diff.NumUpdates,
|
||||||
|
)
|
||||||
|
|
||||||
exit, err := r.Delegate.PerformUpdates(ctx, diff.Updates)
|
exit, err := r.Delegate.PerformUpdates(ctx, diff.Updates)
|
||||||
if exit {
|
if exit {
|
||||||
|
@ -256,7 +277,7 @@ func (r *IndexReplicator) Replicate(ctx context.Context, lastRemoteIndex uint64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false, fmt.Errorf("failed to apply local %s updates: %v", r.Delegate.SingularNoun(), err)
|
return 0, false, fmt.Errorf("failed to apply local %s updates: %v", r.Delegate.SingularNoun(), err)
|
||||||
}
|
}
|
||||||
r.Logger.Printf("[DEBUG] replication: %s replication - finished updates", r.Delegate.SingularNoun())
|
r.Logger.Debug("finished updates")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the index we got back from the remote side, since we've synced
|
// Return the index we got back from the remote side, since we've synced
|
||||||
|
|
|
@ -6,17 +6,18 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReplicationRestart(t *testing.T) {
|
func TestReplicationRestart(t *testing.T) {
|
||||||
mgr := NewLeaderRoutineManager(testutil.TestLogger(t))
|
mgr := NewLeaderRoutineManager(testutil.Logger(t))
|
||||||
|
|
||||||
config := ReplicatorConfig{
|
config := ReplicatorConfig{
|
||||||
Name: "mock",
|
Name: "mock",
|
||||||
Delegate: &FunctionReplicator{
|
Delegate: &FunctionReplicator{
|
||||||
ReplicateFn: func(ctx context.Context, lastRemoteIndex uint64) (uint64, bool, error) {
|
ReplicateFn: func(ctx context.Context, lastRemoteIndex uint64, logger hclog.Logger) (uint64, bool, error) {
|
||||||
return 1, false, nil
|
return 1, false, nil
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -86,12 +87,12 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(0)).Return(0, nil, uint64(0), fmt.Errorf("induced error"))
|
delegate.On("FetchRemote", uint64(0)).Return(0, nil, uint64(0), fmt.Errorf("induced error"))
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 0)
|
idx, done, err := replicator.Replicate(context.Background(), 0, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
@ -105,13 +106,13 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(1), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(1), nil)
|
||||||
delegate.On("FetchLocal").Return(0, nil, fmt.Errorf("induced error"))
|
delegate.On("FetchLocal").Return(0, nil, fmt.Errorf("induced error"))
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
@ -125,7 +126,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(1), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(1), nil)
|
||||||
|
@ -133,7 +134,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
// this also is verifying that when the remote index goes backwards then we reset the index to 0
|
// this also is verifying that when the remote index goes backwards then we reset the index to 0
|
||||||
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(0)).Return(&IndexReplicatorDiff{}, fmt.Errorf("induced error"))
|
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(0)).Return(&IndexReplicatorDiff{}, fmt.Errorf("induced error"))
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
@ -147,14 +148,14 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
||||||
delegate.On("FetchLocal").Return(1, nil, nil)
|
delegate.On("FetchLocal").Return(1, nil, nil)
|
||||||
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{}, nil)
|
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{}, nil)
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(4), idx)
|
require.Equal(t, uint64(4), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
@ -167,7 +168,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
||||||
|
@ -175,7 +176,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumDeletions: 1}, nil)
|
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumDeletions: 1}, nil)
|
||||||
delegate.On("PerformDeletions", nil).Return(false, fmt.Errorf("induced error"))
|
delegate.On("PerformDeletions", nil).Return(false, fmt.Errorf("induced error"))
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
@ -189,7 +190,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
||||||
|
@ -197,7 +198,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumDeletions: 1}, nil)
|
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumDeletions: 1}, nil)
|
||||||
delegate.On("PerformDeletions", nil).Return(true, nil)
|
delegate.On("PerformDeletions", nil).Return(true, nil)
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.True(t, done)
|
require.True(t, done)
|
||||||
|
@ -210,7 +211,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
||||||
|
@ -218,7 +219,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumUpdates: 1}, nil)
|
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumUpdates: 1}, nil)
|
||||||
delegate.On("PerformUpdates", nil).Return(false, fmt.Errorf("induced error"))
|
delegate.On("PerformUpdates", nil).Return(false, fmt.Errorf("induced error"))
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
@ -232,7 +233,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(1, nil, uint64(4), nil)
|
||||||
|
@ -240,7 +241,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumUpdates: 1}, nil)
|
delegate.On("DiffRemoteAndLocalState", nil, nil, uint64(3)).Return(&IndexReplicatorDiff{NumUpdates: 1}, nil)
|
||||||
delegate.On("PerformUpdates", nil).Return(true, nil)
|
delegate.On("PerformUpdates", nil).Return(true, nil)
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(0), idx)
|
require.Equal(t, uint64(0), idx)
|
||||||
require.True(t, done)
|
require.True(t, done)
|
||||||
|
@ -253,7 +254,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
|
|
||||||
replicator := IndexReplicator{
|
replicator := IndexReplicator{
|
||||||
Delegate: delegate,
|
Delegate: delegate,
|
||||||
Logger: testutil.TestLogger(t),
|
Logger: testutil.Logger(t),
|
||||||
}
|
}
|
||||||
|
|
||||||
delegate.On("FetchRemote", uint64(3)).Return(3, "bcd", uint64(4), nil)
|
delegate.On("FetchRemote", uint64(3)).Return(3, "bcd", uint64(4), nil)
|
||||||
|
@ -262,7 +263,7 @@ func TestIndexReplicator(t *testing.T) {
|
||||||
delegate.On("PerformDeletions", "a").Return(false, nil)
|
delegate.On("PerformDeletions", "a").Return(false, nil)
|
||||||
delegate.On("PerformUpdates", "bcd").Return(false, nil)
|
delegate.On("PerformUpdates", "bcd").Return(false, nil)
|
||||||
|
|
||||||
idx, done, err := replicator.Replicate(context.Background(), 3)
|
idx, done, err := replicator.Replicate(context.Background(), 3, nil)
|
||||||
|
|
||||||
require.Equal(t, uint64(4), idx)
|
require.Equal(t, uint64(4), idx)
|
||||||
require.False(t, done)
|
require.False(t, done)
|
||||||
|
|
|
@ -15,6 +15,8 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/pool"
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
memdb "github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-raftchunking"
|
"github.com/hashicorp/go-raftchunking"
|
||||||
"github.com/hashicorp/memberlist"
|
"github.com/hashicorp/memberlist"
|
||||||
|
@ -45,6 +47,10 @@ var (
|
||||||
ErrChunkingResubmit = errors.New("please resubmit call for rechunking")
|
ErrChunkingResubmit = errors.New("please resubmit call for rechunking")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (s *Server) rpcLogger() hclog.Logger {
|
||||||
|
return s.loggers.Named(logging.RPC)
|
||||||
|
}
|
||||||
|
|
||||||
// listen is used to listen for incoming RPC connections
|
// listen is used to listen for incoming RPC connections
|
||||||
func (s *Server) listen(listener net.Listener) {
|
func (s *Server) listen(listener net.Listener) {
|
||||||
for {
|
for {
|
||||||
|
@ -54,7 +60,7 @@ func (s *Server) listen(listener net.Listener) {
|
||||||
if s.shutdown {
|
if s.shutdown {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.logger.Printf("[ERR] consul.rpc: failed to accept RPC conn: %v", err)
|
s.rpcLogger().Error("failed to accept RPC conn", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +82,10 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) {
|
||||||
buf := make([]byte, 1)
|
buf := make([]byte, 1)
|
||||||
if _, err := conn.Read(buf); err != nil {
|
if _, err := conn.Read(buf); err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
s.logger.Printf("[ERR] consul.rpc: failed to read byte: %v %s", err, logConn(conn))
|
s.rpcLogger().Error("failed to read byte",
|
||||||
|
"conn", logConn(conn),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
conn.Close()
|
conn.Close()
|
||||||
return
|
return
|
||||||
|
@ -85,7 +94,7 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) {
|
||||||
|
|
||||||
// Enforce TLS if VerifyIncoming is set
|
// Enforce TLS if VerifyIncoming is set
|
||||||
if s.tlsConfigurator.VerifyIncomingRPC() && !isTLS && typ != pool.RPCTLS && typ != pool.RPCTLSInsecure {
|
if s.tlsConfigurator.VerifyIncomingRPC() && !isTLS && typ != pool.RPCTLS && typ != pool.RPCTLSInsecure {
|
||||||
s.logger.Printf("[WARN] consul.rpc: Non-TLS connection attempted with VerifyIncoming set %s", logConn(conn))
|
s.rpcLogger().Warn("Non-TLS connection attempted with VerifyIncoming set", "conn", logConn(conn))
|
||||||
conn.Close()
|
conn.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -115,7 +124,10 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) {
|
||||||
|
|
||||||
default:
|
default:
|
||||||
if !s.handleEnterpriseRPCConn(typ, conn, isTLS) {
|
if !s.handleEnterpriseRPCConn(typ, conn, isTLS) {
|
||||||
s.logger.Printf("[ERR] consul.rpc: unrecognized RPC byte: %v %s", typ, logConn(conn))
|
s.rpcLogger().Error("unrecognized RPC byte",
|
||||||
|
"byte", typ,
|
||||||
|
"conn", logConn(conn),
|
||||||
|
)
|
||||||
conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -132,7 +144,10 @@ func (s *Server) handleMultiplexV2(conn net.Conn) {
|
||||||
sub, err := server.Accept()
|
sub, err := server.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
s.logger.Printf("[ERR] consul.rpc: multiplex conn accept failed: %v %s", err, logConn(conn))
|
s.rpcLogger().Error("multiplex conn accept failed",
|
||||||
|
"conn", logConn(conn),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -153,7 +168,10 @@ func (s *Server) handleConsulConn(conn net.Conn) {
|
||||||
|
|
||||||
if err := s.rpcServer.ServeRequest(rpcCodec); err != nil {
|
if err := s.rpcServer.ServeRequest(rpcCodec); err != nil {
|
||||||
if err != io.EOF && !strings.Contains(err.Error(), "closed") {
|
if err != io.EOF && !strings.Contains(err.Error(), "closed") {
|
||||||
s.logger.Printf("[ERR] consul.rpc: RPC error: %v %s", err, logConn(conn))
|
s.rpcLogger().Error("RPC error",
|
||||||
|
"conn", logConn(conn),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
metrics.IncrCounter([]string{"rpc", "request_error"}, 1)
|
metrics.IncrCounter([]string{"rpc", "request_error"}, 1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@ -175,7 +193,10 @@ func (s *Server) handleInsecureConn(conn net.Conn) {
|
||||||
|
|
||||||
if err := s.insecureRPCServer.ServeRequest(rpcCodec); err != nil {
|
if err := s.insecureRPCServer.ServeRequest(rpcCodec); err != nil {
|
||||||
if err != io.EOF && !strings.Contains(err.Error(), "closed") {
|
if err != io.EOF && !strings.Contains(err.Error(), "closed") {
|
||||||
s.logger.Printf("[ERR] consul.rpc: INSECURERPC error: %v %s", err, logConn(conn))
|
s.rpcLogger().Error("INSECURERPC error",
|
||||||
|
"conn", logConn(conn),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
metrics.IncrCounter([]string{"rpc", "request_error"}, 1)
|
metrics.IncrCounter([]string{"rpc", "request_error"}, 1)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@ -190,7 +211,10 @@ func (s *Server) handleSnapshotConn(conn net.Conn) {
|
||||||
go func() {
|
go func() {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
if err := s.handleSnapshotRequest(conn); err != nil {
|
if err := s.handleSnapshotRequest(conn); err != nil {
|
||||||
s.logger.Printf("[ERR] consul.rpc: Snapshot RPC error: %v %s", err, logConn(conn))
|
s.rpcLogger().Error("Snapshot RPC error",
|
||||||
|
"conn", logConn(conn),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -309,10 +333,13 @@ func (s *Server) forwardDC(method, dc string, args interface{}, reply interface{
|
||||||
manager, server, ok := s.router.FindRoute(dc)
|
manager, server, ok := s.router.FindRoute(dc)
|
||||||
if !ok {
|
if !ok {
|
||||||
if s.router.HasDatacenter(dc) {
|
if s.router.HasDatacenter(dc) {
|
||||||
s.logger.Printf("[WARN] consul.rpc: RPC request to DC %q is currently failing as no server can be reached", dc)
|
s.rpcLogger().Warn("RPC request to DC is currently failing as no server can be reached", "datacenter", dc)
|
||||||
return structs.ErrDCNotAvailable
|
return structs.ErrDCNotAvailable
|
||||||
}
|
}
|
||||||
s.logger.Printf("[WARN] consul.rpc: RPC request for DC %q, no path found (method: %s)", dc, method)
|
s.rpcLogger().Warn("RPC request for DC is currently failing as no path was found",
|
||||||
|
"datacenter", dc,
|
||||||
|
"method", method,
|
||||||
|
)
|
||||||
return structs.ErrNoDCPath
|
return structs.ErrNoDCPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,7 +347,12 @@ func (s *Server) forwardDC(method, dc string, args interface{}, reply interface{
|
||||||
[]metrics.Label{{Name: "datacenter", Value: dc}})
|
[]metrics.Label{{Name: "datacenter", Value: dc}})
|
||||||
if err := s.connPool.RPC(dc, server.Addr, server.Version, method, server.UseTLS, args, reply); err != nil {
|
if err := s.connPool.RPC(dc, server.Addr, server.Version, method, server.UseTLS, args, reply); err != nil {
|
||||||
manager.NotifyFailedServer(server)
|
manager.NotifyFailedServer(server)
|
||||||
s.logger.Printf("[ERR] consul: RPC failed to server %s in DC %q: %v (method: %s)", server.Addr, dc, err, method)
|
s.rpcLogger().Error("RPC failed to server in DC",
|
||||||
|
"server", server.Addr,
|
||||||
|
"datacenter", dc,
|
||||||
|
"method", method,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,7 +429,7 @@ func (s *Server) raftApplyWithEncoder(t structs.MessageType, msg interface{}, en
|
||||||
|
|
||||||
// Warn if the command is very large
|
// Warn if the command is very large
|
||||||
if n := len(buf); n > raftWarnSize {
|
if n := len(buf); n > raftWarnSize {
|
||||||
s.logger.Printf("[WARN] consul: Attempting to apply large raft entry (%d bytes)", n)
|
s.rpcLogger().Warn("Attempting to apply large raft entry", "size_in_bytes", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
var chunked bool
|
var chunked bool
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/rpc"
|
"net/rpc"
|
||||||
"os"
|
"os"
|
||||||
|
@ -30,6 +29,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/agent/token"
|
"github.com/hashicorp/consul/agent/token"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
|
@ -167,7 +167,8 @@ type Server struct {
|
||||||
fsm *fsm.FSM
|
fsm *fsm.FSM
|
||||||
|
|
||||||
// Logger uses the provided LogOutput
|
// Logger uses the provided LogOutput
|
||||||
logger *log.Logger
|
logger hclog.InterceptLogger
|
||||||
|
loggers *loggerStore
|
||||||
|
|
||||||
// The raft instance is used among Consul nodes within the DC to protect
|
// The raft instance is used among Consul nodes within the DC to protect
|
||||||
// operations that require strong consistency.
|
// operations that require strong consistency.
|
||||||
|
@ -291,7 +292,7 @@ func NewServer(config *Config) (*Server, error) {
|
||||||
|
|
||||||
// NewServerLogger is used to construct a new Consul server from the
|
// NewServerLogger is used to construct a new Consul server from the
|
||||||
// configuration, potentially returning an error
|
// configuration, potentially returning an error
|
||||||
func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store, tlsConfigurator *tlsutil.Configurator) (*Server, error) {
|
func NewServerLogger(config *Config, logger hclog.InterceptLogger, tokens *token.Store, tlsConfigurator *tlsutil.Configurator) (*Server, error) {
|
||||||
// Check the protocol version.
|
// Check the protocol version.
|
||||||
if err := config.CheckProtocolVersion(); err != nil {
|
if err := config.CheckProtocolVersion(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -311,8 +312,12 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store, tl
|
||||||
if config.LogOutput == nil {
|
if config.LogOutput == nil {
|
||||||
config.LogOutput = os.Stderr
|
config.LogOutput = os.Stderr
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.New(config.LogOutput, "", log.LstdFlags)
|
logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
|
Level: hclog.Debug,
|
||||||
|
Output: config.LogOutput,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if TLS is enabled
|
// Check if TLS is enabled
|
||||||
|
@ -351,6 +356,8 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store, tl
|
||||||
ForceTLS: config.VerifyOutgoing,
|
ForceTLS: config.VerifyOutgoing,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
serverLogger := logger.NamedIntercept(logging.ConsulServer)
|
||||||
|
loggers := newLoggerStore(serverLogger)
|
||||||
// Create server.
|
// Create server.
|
||||||
s := &Server{
|
s := &Server{
|
||||||
config: config,
|
config: config,
|
||||||
|
@ -358,10 +365,11 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store, tl
|
||||||
connPool: connPool,
|
connPool: connPool,
|
||||||
eventChLAN: make(chan serf.Event, serfEventChSize),
|
eventChLAN: make(chan serf.Event, serfEventChSize),
|
||||||
eventChWAN: make(chan serf.Event, serfEventChSize),
|
eventChWAN: make(chan serf.Event, serfEventChSize),
|
||||||
logger: logger,
|
logger: serverLogger,
|
||||||
|
loggers: loggers,
|
||||||
leaveCh: make(chan struct{}),
|
leaveCh: make(chan struct{}),
|
||||||
reconcileCh: make(chan serf.Member, reconcileChSize),
|
reconcileCh: make(chan serf.Member, reconcileChSize),
|
||||||
router: router.NewRouter(logger, config.Datacenter),
|
router: router.NewRouter(serverLogger, config.Datacenter),
|
||||||
rpcServer: rpc.NewServer(),
|
rpcServer: rpc.NewServer(),
|
||||||
insecureRPCServer: rpc.NewServer(),
|
insecureRPCServer: rpc.NewServer(),
|
||||||
tlsConfigurator: tlsConfigurator,
|
tlsConfigurator: tlsConfigurator,
|
||||||
|
@ -384,11 +392,11 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store, tl
|
||||||
s.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst))
|
s.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst))
|
||||||
|
|
||||||
configReplicatorConfig := ReplicatorConfig{
|
configReplicatorConfig := ReplicatorConfig{
|
||||||
Name: "Config Entry",
|
Name: logging.ConfigEntry,
|
||||||
Delegate: &FunctionReplicator{ReplicateFn: s.replicateConfig},
|
Delegate: &FunctionReplicator{ReplicateFn: s.replicateConfig},
|
||||||
Rate: s.config.ConfigReplicationRate,
|
Rate: s.config.ConfigReplicationRate,
|
||||||
Burst: s.config.ConfigReplicationBurst,
|
Burst: s.config.ConfigReplicationBurst,
|
||||||
Logger: logger,
|
Logger: s.loggers.Named(logging.Replication).Named(logging.ConfigEntry),
|
||||||
}
|
}
|
||||||
s.configReplicator, err = NewReplicator(&configReplicatorConfig)
|
s.configReplicator, err = NewReplicator(&configReplicatorConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -462,7 +470,7 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store, tl
|
||||||
if serfBindPortWAN == 0 {
|
if serfBindPortWAN == 0 {
|
||||||
return nil, fmt.Errorf("Failed to get dynamic bind port for WAN Serf")
|
return nil, fmt.Errorf("Failed to get dynamic bind port for WAN Serf")
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] agent: Serf WAN TCP bound to port %d", serfBindPortWAN)
|
s.logger.Info("Serf WAN TCP bound", "port", serfBindPortWAN)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,7 +542,7 @@ func (s *Server) trackAutoEncryptCARoots() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.shutdownCh:
|
case <-s.shutdownCh:
|
||||||
s.logger.Printf("[DEBUG] agent: shutting down trackAutoEncryptCARoots because shutdown")
|
s.logger.Debug("shutting down trackAutoEncryptCARoots because shutdown")
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
@ -543,7 +551,7 @@ func (s *Server) trackAutoEncryptCARoots() {
|
||||||
ws.Add(state.AbandonCh())
|
ws.Add(state.AbandonCh())
|
||||||
_, cas, err := state.CARoots(ws)
|
_, cas, err := state.CARoots(ws)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[DEBUG] agent: Failed to watch AutoEncrypt CARoot: %v", err)
|
s.logger.Error("Failed to watch AutoEncrypt CARoot", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
caPems := []string{}
|
caPems := []string{}
|
||||||
|
@ -551,7 +559,7 @@ func (s *Server) trackAutoEncryptCARoots() {
|
||||||
caPems = append(caPems, ca.RootCert)
|
caPems = append(caPems, ca.RootCert)
|
||||||
}
|
}
|
||||||
if err := s.tlsConfigurator.UpdateAutoEncryptCA(caPems); err != nil {
|
if err := s.tlsConfigurator.UpdateAutoEncryptCA(caPems); err != nil {
|
||||||
s.logger.Printf("[DEBUG] agent: Failed to update AutoEncrypt CAPems: %v", err)
|
s.logger.Error("Failed to update AutoEncrypt CAPems", "error", err)
|
||||||
}
|
}
|
||||||
ws.Watch(nil)
|
ws.Watch(nil)
|
||||||
}
|
}
|
||||||
|
@ -563,14 +571,14 @@ func (s *Server) setupRaft() error {
|
||||||
defer func() {
|
defer func() {
|
||||||
if s.raft == nil && s.raftStore != nil {
|
if s.raft == nil && s.raftStore != nil {
|
||||||
if err := s.raftStore.Close(); err != nil {
|
if err := s.raftStore.Close(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to close Raft store: %v", err)
|
s.logger.Error("failed to close Raft store", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Create the FSM.
|
// Create the FSM.
|
||||||
var err error
|
var err error
|
||||||
s.fsm, err = fsm.New(s.tombstoneGC, s.config.LogOutput)
|
s.fsm, err = fsm.New(s.tombstoneGC, s.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -580,28 +588,18 @@ func (s *Server) setupRaft() error {
|
||||||
serverAddressProvider = s.serverLookup
|
serverAddressProvider = s.serverLookup
|
||||||
}
|
}
|
||||||
|
|
||||||
raftLogger := hclog.New(&hclog.LoggerOptions{
|
|
||||||
Name: "raft",
|
|
||||||
Level: hclog.LevelFromString(s.config.LogLevel),
|
|
||||||
Output: s.config.LogOutput,
|
|
||||||
TimeFormat: `2006/01/02 15:04:05`,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create a transport layer.
|
// Create a transport layer.
|
||||||
transConfig := &raft.NetworkTransportConfig{
|
transConfig := &raft.NetworkTransportConfig{
|
||||||
Stream: s.raftLayer,
|
Stream: s.raftLayer,
|
||||||
MaxPool: 3,
|
MaxPool: 3,
|
||||||
Timeout: 10 * time.Second,
|
Timeout: 10 * time.Second,
|
||||||
ServerAddressProvider: serverAddressProvider,
|
ServerAddressProvider: serverAddressProvider,
|
||||||
Logger: raftLogger,
|
Logger: s.loggers.Named(logging.Raft),
|
||||||
}
|
}
|
||||||
|
|
||||||
trans := raft.NewNetworkTransportWithConfig(transConfig)
|
trans := raft.NewNetworkTransportWithConfig(transConfig)
|
||||||
s.raftTransport = trans
|
s.raftTransport = trans
|
||||||
|
s.config.RaftConfig.Logger = s.loggers.Named(logging.Raft)
|
||||||
// Make sure we set the LogOutput.
|
|
||||||
s.config.RaftConfig.LogOutput = s.config.LogOutput
|
|
||||||
s.config.RaftConfig.Logger = raftLogger
|
|
||||||
|
|
||||||
// Versions of the Raft protocol below 3 require the LocalID to match the network
|
// Versions of the Raft protocol below 3 require the LocalID to match the network
|
||||||
// address of the transport.
|
// address of the transport.
|
||||||
|
@ -670,10 +668,10 @@ func (s *Server) setupRaft() error {
|
||||||
if err := os.Remove(peersFile); err != nil {
|
if err := os.Remove(peersFile); err != nil {
|
||||||
return fmt.Errorf("failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
|
return fmt.Errorf("failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: deleted peers.json file (see peers.info for details)")
|
s.logger.Info("deleted peers.json file (see peers.info for details)")
|
||||||
}
|
}
|
||||||
} else if _, err := os.Stat(peersFile); err == nil {
|
} else if _, err := os.Stat(peersFile); err == nil {
|
||||||
s.logger.Printf("[INFO] consul: found peers.json file, recovering Raft configuration...")
|
s.logger.Info("found peers.json file, recovering Raft configuration...")
|
||||||
|
|
||||||
var configuration raft.Configuration
|
var configuration raft.Configuration
|
||||||
if s.config.RaftConfig.ProtocolVersion < 3 {
|
if s.config.RaftConfig.ProtocolVersion < 3 {
|
||||||
|
@ -685,7 +683,7 @@ func (s *Server) setupRaft() error {
|
||||||
return fmt.Errorf("recovery failed to parse peers.json: %v", err)
|
return fmt.Errorf("recovery failed to parse peers.json: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpFsm, err := fsm.New(s.tombstoneGC, s.config.LogOutput)
|
tmpFsm, err := fsm.New(s.tombstoneGC, s.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("recovery failed to make temp FSM: %v", err)
|
return fmt.Errorf("recovery failed to make temp FSM: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -697,7 +695,7 @@ func (s *Server) setupRaft() error {
|
||||||
if err := os.Remove(peersFile); err != nil {
|
if err := os.Remove(peersFile); err != nil {
|
||||||
return fmt.Errorf("recovery failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
|
return fmt.Errorf("recovery failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: deleted peers.json file after successful recovery")
|
s.logger.Info("deleted peers.json file after successful recovery")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -803,7 +801,7 @@ func (s *Server) setupRPC() error {
|
||||||
|
|
||||||
// Shutdown is used to shutdown the server
|
// Shutdown is used to shutdown the server
|
||||||
func (s *Server) Shutdown() error {
|
func (s *Server) Shutdown() error {
|
||||||
s.logger.Printf("[INFO] consul: shutting down server")
|
s.logger.Info("shutting down server")
|
||||||
s.shutdownLock.Lock()
|
s.shutdownLock.Lock()
|
||||||
defer s.shutdownLock.Unlock()
|
defer s.shutdownLock.Unlock()
|
||||||
|
|
||||||
|
@ -826,7 +824,7 @@ func (s *Server) Shutdown() error {
|
||||||
if s.serfWAN != nil {
|
if s.serfWAN != nil {
|
||||||
s.serfWAN.Shutdown()
|
s.serfWAN.Shutdown()
|
||||||
if err := s.router.RemoveArea(types.AreaWAN); err != nil {
|
if err := s.router.RemoveArea(types.AreaWAN); err != nil {
|
||||||
s.logger.Printf("[WARN] consul: error removing WAN area: %v", err)
|
s.logger.Warn("error removing WAN area", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.router.Shutdown()
|
s.router.Shutdown()
|
||||||
|
@ -836,7 +834,7 @@ func (s *Server) Shutdown() error {
|
||||||
s.raftLayer.Close()
|
s.raftLayer.Close()
|
||||||
future := s.raft.Shutdown()
|
future := s.raft.Shutdown()
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[WARN] consul: error shutting down raft: %s", err)
|
s.logger.Warn("error shutting down raft", "error", err)
|
||||||
}
|
}
|
||||||
if s.raftStore != nil {
|
if s.raftStore != nil {
|
||||||
s.raftStore.Close()
|
s.raftStore.Close()
|
||||||
|
@ -861,12 +859,12 @@ func (s *Server) Shutdown() error {
|
||||||
|
|
||||||
// Leave is used to prepare for a graceful shutdown of the server
|
// Leave is used to prepare for a graceful shutdown of the server
|
||||||
func (s *Server) Leave() error {
|
func (s *Server) Leave() error {
|
||||||
s.logger.Printf("[INFO] consul: server starting leave")
|
s.logger.Info("server starting leave")
|
||||||
|
|
||||||
// Check the number of known peers
|
// Check the number of known peers
|
||||||
numPeers, err := s.numPeers()
|
numPeers, err := s.numPeers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to check raft peers: %v", err)
|
s.logger.Error("failed to check raft peers", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -886,12 +884,12 @@ func (s *Server) Leave() error {
|
||||||
if minRaftProtocol >= 2 && s.config.RaftConfig.ProtocolVersion >= 3 {
|
if minRaftProtocol >= 2 && s.config.RaftConfig.ProtocolVersion >= 3 {
|
||||||
future := s.raft.RemoveServer(raft.ServerID(s.config.NodeID), 0, 0)
|
future := s.raft.RemoveServer(raft.ServerID(s.config.NodeID), 0, 0)
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to remove ourself as raft peer: %v", err)
|
s.logger.Error("failed to remove ourself as raft peer", "error", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
future := s.raft.RemovePeer(addr)
|
future := s.raft.RemovePeer(addr)
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to remove ourself as raft peer: %v", err)
|
s.logger.Error("failed to remove ourself as raft peer", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -899,14 +897,14 @@ func (s *Server) Leave() error {
|
||||||
// Leave the WAN pool
|
// Leave the WAN pool
|
||||||
if s.serfWAN != nil {
|
if s.serfWAN != nil {
|
||||||
if err := s.serfWAN.Leave(); err != nil {
|
if err := s.serfWAN.Leave(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to leave WAN Serf cluster: %v", err)
|
s.logger.Error("failed to leave WAN Serf cluster", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leave the LAN pool
|
// Leave the LAN pool
|
||||||
if s.serfLAN != nil {
|
if s.serfLAN != nil {
|
||||||
if err := s.serfLAN.Leave(); err != nil {
|
if err := s.serfLAN.Leave(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to leave LAN Serf cluster: %v", err)
|
s.logger.Error("failed to leave LAN Serf cluster", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -917,7 +915,7 @@ func (s *Server) Leave() error {
|
||||||
// to do this *after* we've left the LAN pool so that clients will know
|
// to do this *after* we've left the LAN pool so that clients will know
|
||||||
// to shift onto another server if they perform a retry. We also wake up
|
// to shift onto another server if they perform a retry. We also wake up
|
||||||
// all queries in the RPC retry state.
|
// all queries in the RPC retry state.
|
||||||
s.logger.Printf("[INFO] consul: Waiting %s to drain RPC traffic", s.config.LeaveDrainTime)
|
s.logger.Info("Waiting to drain RPC traffic", "drain_time", s.config.LeaveDrainTime)
|
||||||
close(s.leaveCh)
|
close(s.leaveCh)
|
||||||
time.Sleep(s.config.LeaveDrainTime)
|
time.Sleep(s.config.LeaveDrainTime)
|
||||||
|
|
||||||
|
@ -934,7 +932,7 @@ func (s *Server) Leave() error {
|
||||||
// Get the latest configuration.
|
// Get the latest configuration.
|
||||||
future := s.raft.GetConfiguration()
|
future := s.raft.GetConfiguration()
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: failed to get raft configuration: %v", err)
|
s.logger.Error("failed to get raft configuration", "error", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -964,7 +962,7 @@ func (s *Server) Leave() error {
|
||||||
// may not realize that it has been removed. Need to revisit this
|
// may not realize that it has been removed. Need to revisit this
|
||||||
// and the warning here.
|
// and the warning here.
|
||||||
if !left {
|
if !left {
|
||||||
s.logger.Printf("[WARN] consul: failed to leave raft configuration gracefully, timeout")
|
s.logger.Warn("failed to leave raft configuration gracefully, timeout")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1165,7 +1163,7 @@ func (s *Server) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := snap.Close(); err != nil {
|
if err := snap.Close(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: Failed to close snapshot: %v", err)
|
s.logger.Error("Failed to close snapshot", "error", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -1187,7 +1185,7 @@ func (s *Server) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
||||||
|
|
||||||
// RegisterEndpoint is used to substitute an endpoint for testing.
|
// RegisterEndpoint is used to substitute an endpoint for testing.
|
||||||
func (s *Server) RegisterEndpoint(name string, handler interface{}) error {
|
func (s *Server) RegisterEndpoint(name string, handler interface{}) error {
|
||||||
s.logger.Printf("[WARN] consul: endpoint injected; this should only be used for testing")
|
s.logger.Warn("endpoint injected; this should only be used for testing")
|
||||||
return s.rpcServer.RegisterName(name, handler)
|
return s.rpcServer.RegisterName(name, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,23 @@
|
||||||
package consul
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerEndpoint(func(s *Server) interface{} { return &ACL{s} })
|
registerEndpoint(func(s *Server) interface{} { return &ACL{s, s.loggers.Named(logging.ACL)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Catalog{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Catalog{s} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return NewCoordinate(s) })
|
registerEndpoint(func(s *Server) interface{} { return NewCoordinate(s, s.logger) })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &ConfigEntry{s} })
|
registerEndpoint(func(s *Server) interface{} { return &ConfigEntry{s} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &ConnectCA{srv: s} })
|
registerEndpoint(func(s *Server) interface{} { return &ConnectCA{srv: s, logger: s.loggers.Named(logging.Connect)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &DiscoveryChain{s} })
|
registerEndpoint(func(s *Server) interface{} { return &DiscoveryChain{s} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Health{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Health{s} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Intention{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Intention{s, s.loggers.Named(logging.Intentions)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Internal{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Internal{s, s.loggers.Named(logging.Internal)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &KVS{s} })
|
registerEndpoint(func(s *Server) interface{} { return &KVS{s, s.loggers.Named(logging.KV)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Operator{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Operator{s, s.loggers.Named(logging.Operator)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &PreparedQuery{s} })
|
registerEndpoint(func(s *Server) interface{} { return &PreparedQuery{s, s.loggers.Named(logging.PreparedQuery)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Session{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Session{s, s.loggers.Named(logging.Session)} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Status{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Status{s} })
|
||||||
registerEndpoint(func(s *Server) interface{} { return &Txn{s} })
|
registerEndpoint(func(s *Server) interface{} { return &Txn{s, s.loggers.Named(logging.Transaction)} })
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
|
@ -77,12 +79,19 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
|
||||||
} else {
|
} else {
|
||||||
conf.Tags["acls"] = string(structs.ACLModeDisabled)
|
conf.Tags["acls"] = string(structs.ACLModeDisabled)
|
||||||
}
|
}
|
||||||
if s.logger == nil {
|
|
||||||
conf.MemberlistConfig.LogOutput = s.config.LogOutput
|
// Wrap hclog in a standard logger wrapper for serf and memberlist
|
||||||
conf.LogOutput = s.config.LogOutput
|
// We use the Intercept variant here to ensure that serf and memberlist logs
|
||||||
}
|
// can be streamed via the monitor endpoint
|
||||||
conf.MemberlistConfig.Logger = s.logger
|
serfLogger := s.logger.
|
||||||
conf.Logger = s.logger
|
NamedIntercept(logging.Serf).
|
||||||
|
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true})
|
||||||
|
memberlistLogger := s.logger.
|
||||||
|
NamedIntercept(logging.Memberlist).
|
||||||
|
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true})
|
||||||
|
|
||||||
|
conf.MemberlistConfig.Logger = memberlistLogger
|
||||||
|
conf.Logger = serfLogger
|
||||||
conf.EventCh = ch
|
conf.EventCh = ch
|
||||||
conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion]
|
conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion]
|
||||||
conf.RejoinAfterLeave = s.config.RejoinAfterLeave
|
conf.RejoinAfterLeave = s.config.RejoinAfterLeave
|
||||||
|
@ -147,7 +156,7 @@ func (s *Server) lanEventHandler() {
|
||||||
s.localMemberEvent(e.(serf.MemberEvent))
|
s.localMemberEvent(e.(serf.MemberEvent))
|
||||||
case serf.EventQuery: // Ignore
|
case serf.EventQuery: // Ignore
|
||||||
default:
|
default:
|
||||||
s.logger.Printf("[WARN] consul: Unhandled LAN Serf Event: %#v", e)
|
s.logger.Warn("Unhandled LAN Serf Event", "event", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-s.shutdownCh:
|
case <-s.shutdownCh:
|
||||||
|
@ -189,7 +198,7 @@ func (s *Server) localEvent(event serf.UserEvent) {
|
||||||
|
|
||||||
switch name := event.Name; {
|
switch name := event.Name; {
|
||||||
case name == newLeaderEvent:
|
case name == newLeaderEvent:
|
||||||
s.logger.Printf("[INFO] consul: New leader elected: %s", event.Payload)
|
s.logger.Info("New leader elected", "payload", string(event.Payload))
|
||||||
|
|
||||||
// Trigger the callback
|
// Trigger the callback
|
||||||
if s.config.ServerUp != nil {
|
if s.config.ServerUp != nil {
|
||||||
|
@ -197,7 +206,7 @@ func (s *Server) localEvent(event serf.UserEvent) {
|
||||||
}
|
}
|
||||||
case isUserEvent(name):
|
case isUserEvent(name):
|
||||||
event.Name = rawUserEventName(name)
|
event.Name = rawUserEventName(name)
|
||||||
s.logger.Printf("[DEBUG] consul: User event: %s", event.Name)
|
s.logger.Debug("User event", "event", event.Name)
|
||||||
|
|
||||||
// Trigger the callback
|
// Trigger the callback
|
||||||
if s.config.UserEventHandler != nil {
|
if s.config.UserEventHandler != nil {
|
||||||
|
@ -205,7 +214,7 @@ func (s *Server) localEvent(event serf.UserEvent) {
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if !s.handleEnterpriseUserEvents(event) {
|
if !s.handleEnterpriseUserEvents(event) {
|
||||||
s.logger.Printf("[WARN] consul: Unhandled local event: %v", event)
|
s.logger.Warn("Unhandled local event", "event", event)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -217,7 +226,7 @@ func (s *Server) lanNodeJoin(me serf.MemberEvent) {
|
||||||
if !ok || serverMeta.Segment != "" {
|
if !ok || serverMeta.Segment != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Adding LAN server %s", serverMeta)
|
s.logger.Info("Adding LAN server", "server", serverMeta.String())
|
||||||
|
|
||||||
// Update server lookup
|
// Update server lookup
|
||||||
s.serverLookup.AddServer(serverMeta)
|
s.serverLookup.AddServer(serverMeta)
|
||||||
|
@ -239,11 +248,11 @@ func (s *Server) maybeBootstrap() {
|
||||||
// check that BootstrapCluster will do, so this is a good pre-filter.
|
// check that BootstrapCluster will do, so this is a good pre-filter.
|
||||||
index, err := s.raftStore.LastIndex()
|
index, err := s.raftStore.LastIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] consul: Failed to read last raft index: %v", err)
|
s.logger.Error("Failed to read last raft index", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if index != 0 {
|
if index != 0 {
|
||||||
s.logger.Printf("[INFO] consul: Raft data found, disabling bootstrap mode")
|
s.logger.Info("Raft data found, disabling bootstrap mode")
|
||||||
s.config.BootstrapExpect = 0
|
s.config.BootstrapExpect = 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -258,15 +267,15 @@ func (s *Server) maybeBootstrap() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if p.Datacenter != s.config.Datacenter {
|
if p.Datacenter != s.config.Datacenter {
|
||||||
s.logger.Printf("[ERR] consul: Member %v has a conflicting datacenter, ignoring", member)
|
s.logger.Warn("Member has a conflicting datacenter, ignoring", "member", member)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if p.Expect != 0 && p.Expect != s.config.BootstrapExpect {
|
if p.Expect != 0 && p.Expect != s.config.BootstrapExpect {
|
||||||
s.logger.Printf("[ERR] consul: Member %v has a conflicting expect value. All nodes should expect the same number.", member)
|
s.logger.Error("Member has a conflicting expect value. All nodes should expect the same number.", "member", member)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if p.Bootstrap {
|
if p.Bootstrap {
|
||||||
s.logger.Printf("[ERR] consul: Member %v has bootstrap mode. Expect disabled.", member)
|
s.logger.Error("Member has bootstrap mode. Expect disabled.", "member", member)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !p.NonVoter {
|
if !p.NonVoter {
|
||||||
|
@ -289,8 +298,11 @@ func (s *Server) maybeBootstrap() {
|
||||||
if err := s.connPool.RPC(s.config.Datacenter, server.Addr, server.Version,
|
if err := s.connPool.RPC(s.config.Datacenter, server.Addr, server.Version,
|
||||||
"Status.Peers", server.UseTLS, &structs.DCSpecificRequest{Datacenter: s.config.Datacenter}, &peers); err != nil {
|
"Status.Peers", server.UseTLS, &structs.DCSpecificRequest{Datacenter: s.config.Datacenter}, &peers); err != nil {
|
||||||
nextRetry := time.Duration((1 << attempt) * peerRetryBase)
|
nextRetry := time.Duration((1 << attempt) * peerRetryBase)
|
||||||
s.logger.Printf("[ERR] consul: Failed to confirm peer status for %s: %v. Retrying in "+
|
s.logger.Error("Failed to confirm peer status for server (will retry).",
|
||||||
"%v...", server.Name, err, nextRetry.String())
|
"server", server.Name,
|
||||||
|
"retry_interval", nextRetry.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
time.Sleep(nextRetry)
|
time.Sleep(nextRetry)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
|
@ -309,7 +321,7 @@ func (s *Server) maybeBootstrap() {
|
||||||
// correctness because no server in the existing cluster will vote
|
// correctness because no server in the existing cluster will vote
|
||||||
// for this server, but it makes things much more stable.
|
// for this server, but it makes things much more stable.
|
||||||
if len(peers) > 0 {
|
if len(peers) > 0 {
|
||||||
s.logger.Printf("[INFO] consul: Existing Raft peers reported by %s, disabling bootstrap mode", server.Name)
|
s.logger.Info("Existing Raft peers reported by server, disabling bootstrap mode", "server", server.Name)
|
||||||
s.config.BootstrapExpect = 0
|
s.config.BootstrapExpect = 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -320,7 +332,7 @@ func (s *Server) maybeBootstrap() {
|
||||||
var addrs []string
|
var addrs []string
|
||||||
minRaftVersion, err := s.autopilot.MinRaftProtocol()
|
minRaftVersion, err := s.autopilot.MinRaftProtocol()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] consul: Failed to read server raft versions: %v", err)
|
s.logger.Error("Failed to read server raft versions", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, server := range servers {
|
for _, server := range servers {
|
||||||
|
@ -343,11 +355,12 @@ func (s *Server) maybeBootstrap() {
|
||||||
}
|
}
|
||||||
configuration.Servers = append(configuration.Servers, peer)
|
configuration.Servers = append(configuration.Servers, peer)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Found expected number of peers, attempting bootstrap: %s",
|
s.logger.Info("Found expected number of peers, attempting bootstrap",
|
||||||
strings.Join(addrs, ","))
|
"peers", strings.Join(addrs, ","),
|
||||||
|
)
|
||||||
future := s.raft.BootstrapCluster(configuration)
|
future := s.raft.BootstrapCluster(configuration)
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: Failed to bootstrap cluster: %v", err)
|
s.logger.Error("Failed to bootstrap cluster", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bootstrapping complete, or failed for some reason, don't enter this
|
// Bootstrapping complete, or failed for some reason, don't enter this
|
||||||
|
@ -362,7 +375,7 @@ func (s *Server) lanNodeFailed(me serf.MemberEvent) {
|
||||||
if !ok || serverMeta.Segment != "" {
|
if !ok || serverMeta.Segment != "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.logger.Printf("[INFO] consul: Removing LAN server %s", serverMeta)
|
s.logger.Info("Removing LAN server", "server", serverMeta.String())
|
||||||
|
|
||||||
// Update id to address map
|
// Update id to address map
|
||||||
s.serverLookup.RemoveServer(serverMeta)
|
s.serverLookup.RemoveServer(serverMeta)
|
||||||
|
|
|
@ -3,7 +3,6 @@ package consul
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -23,6 +22,7 @@ import (
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
|
@ -256,7 +256,11 @@ func newServer(c *Config) (*Server, error) {
|
||||||
if w == nil {
|
if w == nil {
|
||||||
w = os.Stderr
|
w = os.Stderr
|
||||||
}
|
}
|
||||||
logger := log.New(w, c.NodeName+" - ", log.LstdFlags|log.Lmicroseconds)
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
|
Name: c.NodeName,
|
||||||
|
Level: hclog.Debug,
|
||||||
|
Output: w,
|
||||||
|
})
|
||||||
tlsConf, err := tlsutil.NewConfigurator(c.ToTLSUtilConfig(), logger)
|
tlsConf, err := tlsutil.NewConfigurator(c.ToTLSUtilConfig(), logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1177,9 +1181,9 @@ func TestServer_CALogging(t *testing.T) {
|
||||||
|
|
||||||
// Setup dummy logger to catch output
|
// Setup dummy logger to catch output
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
logger := log.New(&buf, "", log.LstdFlags)
|
logger := testutil.LoggerWithOutput(t, &buf)
|
||||||
|
|
||||||
c, err := tlsutil.NewConfigurator(conf1.ToTLSUtilConfig(), nil)
|
c, err := tlsutil.NewConfigurator(conf1.ToTLSUtilConfig(), logger)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s1, err := NewServerLogger(conf1, logger, new(token.Store), c)
|
s1, err := NewServerLogger(conf1, logger, new(token.Store), c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -8,13 +8,15 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Session endpoint is used to manipulate sessions for KV
|
// Session endpoint is used to manipulate sessions for KV
|
||||||
type Session struct {
|
type Session struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply is used to apply a modifying request to the data store. This should
|
// Apply is used to apply a modifying request to the data store. This should
|
||||||
|
@ -106,12 +108,12 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error {
|
||||||
for {
|
for {
|
||||||
var err error
|
var err error
|
||||||
if args.Session.ID, err = uuid.GenerateUUID(); err != nil {
|
if args.Session.ID, err = uuid.GenerateUUID(); err != nil {
|
||||||
s.srv.logger.Printf("[ERR] consul.session: UUID generation failed: %v", err)
|
s.logger.Error("UUID generation failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, sess, err := state.SessionGet(nil, args.Session.ID, &args.Session.EnterpriseMeta)
|
_, sess, err := state.SessionGet(nil, args.Session.ID, &args.Session.EnterpriseMeta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.srv.logger.Printf("[ERR] consul.session: Session lookup failed: %v", err)
|
s.logger.Error("Session lookup failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
|
@ -123,7 +125,7 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error {
|
||||||
// Apply the update
|
// Apply the update
|
||||||
resp, err := s.srv.raftApply(structs.SessionRequestType, args)
|
resp, err := s.srv.raftApply(structs.SessionRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.srv.logger.Printf("[ERR] consul.session: Apply failed: %v", err)
|
s.logger.Error("Apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +294,7 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest,
|
||||||
// Reset the session TTL timer.
|
// Reset the session TTL timer.
|
||||||
reply.Sessions = structs.Sessions{session}
|
reply.Sessions = structs.Sessions{session}
|
||||||
if err := s.srv.resetSessionTimer(args.SessionID, session); err != nil {
|
if err := s.srv.resetSessionTimer(args.SessionID, session); err != nil {
|
||||||
s.srv.logger.Printf("[ERR] consul.session: Session renew failed: %v", err)
|
s.logger.Error("Session renew failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,14 +106,14 @@ func (s *Server) invalidateSession(id string, entMeta *structs.EnterpriseMeta) {
|
||||||
for attempt := uint(0); attempt < maxInvalidateAttempts; attempt++ {
|
for attempt := uint(0); attempt < maxInvalidateAttempts; attempt++ {
|
||||||
_, err := s.raftApply(structs.SessionRequestType, args)
|
_, err := s.raftApply(structs.SessionRequestType, args)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
s.logger.Printf("[DEBUG] consul.state: Session %s TTL expired", id)
|
s.logger.Debug("Session TTL expired", "session", id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[ERR] consul.session: Invalidation failed: %v", err)
|
s.logger.Error("Invalidation failed", "error", err)
|
||||||
time.Sleep((1 << attempt) * invalidateRetryBase)
|
time.Sleep((1 << attempt) * invalidateRetryBase)
|
||||||
}
|
}
|
||||||
s.logger.Printf("[ERR] consul.session: maximum revoke attempts reached for session: %s", id)
|
s.logger.Error("maximum revoke attempts reached for session", "error", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearSessionTimer is used to clear the session time for
|
// clearSessionTimer is used to clear the session time for
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (s *Server) handleSnapshotRequest(conn net.Conn) error {
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := snap.Close(); err != nil {
|
if err := snap.Close(); err != nil {
|
||||||
s.logger.Printf("[ERR] consul: Failed to close snapshot: %v", err)
|
s.logger.Error("Failed to close snapshot", "error", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,12 @@ package consul
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/consul/autopilot"
|
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/agent/pool"
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ import (
|
||||||
// a single in-flight RPC to any given server, so goroutines don't accumulate
|
// a single in-flight RPC to any given server, so goroutines don't accumulate
|
||||||
// as we run the health check fairly frequently.
|
// as we run the health check fairly frequently.
|
||||||
type StatsFetcher struct {
|
type StatsFetcher struct {
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
pool *pool.ConnPool
|
pool *pool.ConnPool
|
||||||
datacenter string
|
datacenter string
|
||||||
inflight map[string]struct{}
|
inflight map[string]struct{}
|
||||||
|
@ -27,7 +27,7 @@ type StatsFetcher struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStatsFetcher returns a stats fetcher.
|
// NewStatsFetcher returns a stats fetcher.
|
||||||
func NewStatsFetcher(logger *log.Logger, pool *pool.ConnPool, datacenter string) *StatsFetcher {
|
func NewStatsFetcher(logger hclog.Logger, pool *pool.ConnPool, datacenter string) *StatsFetcher {
|
||||||
return &StatsFetcher{
|
return &StatsFetcher{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
pool: pool,
|
pool: pool,
|
||||||
|
@ -45,8 +45,10 @@ func (f *StatsFetcher) fetch(server *metadata.Server, replyCh chan *autopilot.Se
|
||||||
var reply autopilot.ServerStats
|
var reply autopilot.ServerStats
|
||||||
err := f.pool.RPC(f.datacenter, server.Addr, server.Version, "Status.RaftStats", server.UseTLS, &args, &reply)
|
err := f.pool.RPC(f.datacenter, server.Addr, server.Version, "Status.RaftStats", server.UseTLS, &args, &reply)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.logger.Printf("[WARN] consul: error getting server health from %q: %v",
|
f.logger.Warn("error getting server health from server",
|
||||||
server.Name, err)
|
"server", server.Name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
replyCh <- &reply
|
replyCh <- &reply
|
||||||
}
|
}
|
||||||
|
@ -74,8 +76,10 @@ func (f *StatsFetcher) Fetch(ctx context.Context, members []serf.Member) map[str
|
||||||
f.inflightLock.Lock()
|
f.inflightLock.Lock()
|
||||||
for _, server := range servers {
|
for _, server := range servers {
|
||||||
if _, ok := f.inflight[server.ID]; ok {
|
if _, ok := f.inflight[server.ID]; ok {
|
||||||
f.logger.Printf("[WARN] consul: error getting server health from %q: last request still outstanding",
|
f.logger.Warn("error getting server health from server",
|
||||||
server.Name)
|
"server", server.Name,
|
||||||
|
"error", "last request still outstanding",
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
workItem := &workItem{
|
workItem := &workItem{
|
||||||
server: server,
|
server: server,
|
||||||
|
@ -105,8 +109,10 @@ func (f *StatsFetcher) Fetch(ctx context.Context, members []serf.Member) map[str
|
||||||
replies[workItem.server.ID] = reply
|
replies[workItem.server.ID] = reply
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
f.logger.Printf("[WARN] consul: error getting server health from %q: %v",
|
f.logger.Warn("error getting server health from server",
|
||||||
workItem.server.Name, ctx.Err())
|
"server", workItem.server.Name,
|
||||||
|
"error", ctx.Err(),
|
||||||
|
)
|
||||||
|
|
||||||
f.inflightLock.Lock()
|
f.inflightLock.Lock()
|
||||||
delete(f.inflight, workItem.server.ID)
|
delete(f.inflight, workItem.server.ID)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
"github.com/hashicorp/consul/tlsutil"
|
"github.com/hashicorp/consul/tlsutil"
|
||||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -8,11 +8,13 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Txn endpoint is used to perform multi-object atomic transactions.
|
// Txn endpoint is used to perform multi-object atomic transactions.
|
||||||
type Txn struct {
|
type Txn struct {
|
||||||
srv *Server
|
srv *Server
|
||||||
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// preCheck is used to verify the incoming operations before any further
|
// preCheck is used to verify the incoming operations before any further
|
||||||
|
@ -24,7 +26,7 @@ func (t *Txn) preCheck(authorizer acl.Authorizer, ops structs.TxnOps) structs.Tx
|
||||||
for i, op := range ops {
|
for i, op := range ops {
|
||||||
switch {
|
switch {
|
||||||
case op.KV != nil:
|
case op.KV != nil:
|
||||||
ok, err := kvsPreApply(t.srv, authorizer, op.KV.Verb, &op.KV.DirEnt)
|
ok, err := kvsPreApply(t.logger, t.srv, authorizer, op.KV.Verb, &op.KV.DirEnt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, &structs.TxnError{
|
errors = append(errors, &structs.TxnError{
|
||||||
OpIndex: i,
|
OpIndex: i,
|
||||||
|
@ -122,7 +124,7 @@ func (t *Txn) Apply(args *structs.TxnRequest, reply *structs.TxnResponse) error
|
||||||
// Apply the update.
|
// Apply the update.
|
||||||
resp, err := t.srv.raftApply(structs.TxnRequestType, args)
|
resp, err := t.srv.raftApply(structs.TxnRequestType, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.srv.logger.Printf("[ERR] consul.txn: Apply failed: %v", err)
|
t.logger.Error("Raft apply failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if respErr, ok := resp.(error); ok {
|
if respErr, ok := resp.(error); ok {
|
||||||
|
|
126
agent/dns.go
126
agent/dns.go
|
@ -3,7 +3,6 @@ package agent
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
@ -21,6 +20,8 @@ import (
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -84,7 +85,7 @@ type DNSServer struct {
|
||||||
mux *dns.ServeMux
|
mux *dns.ServeMux
|
||||||
domain string
|
domain string
|
||||||
altDomain string
|
altDomain string
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
// config stores the config as an atomic value (for hot-reloading). It is always of type *dnsConfig
|
// config stores the config as an atomic value (for hot-reloading). It is always of type *dnsConfig
|
||||||
config atomic.Value
|
config atomic.Value
|
||||||
|
@ -103,7 +104,7 @@ func NewDNSServer(a *Agent) (*DNSServer, error) {
|
||||||
agent: a,
|
agent: a,
|
||||||
domain: domain,
|
domain: domain,
|
||||||
altDomain: altDomain,
|
altDomain: altDomain,
|
||||||
logger: a.logger,
|
logger: a.logger.Named(logging.DNS),
|
||||||
}
|
}
|
||||||
cfg, err := GetDNSConfig(a.config)
|
cfg, err := GetDNSConfig(a.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -212,13 +213,13 @@ func (d *DNSServer) toggleRecursorHandlerFromConfig(cfg *dnsConfig) {
|
||||||
|
|
||||||
if shouldEnable && atomic.CompareAndSwapUint32(&d.recursorEnabled, 0, 1) {
|
if shouldEnable && atomic.CompareAndSwapUint32(&d.recursorEnabled, 0, 1) {
|
||||||
d.mux.HandleFunc(".", d.handleRecurse)
|
d.mux.HandleFunc(".", d.handleRecurse)
|
||||||
d.logger.Println("[DEBUG] dns: recursor enabled")
|
d.logger.Debug("recursor enabled")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !shouldEnable && atomic.CompareAndSwapUint32(&d.recursorEnabled, 1, 0) {
|
if !shouldEnable && atomic.CompareAndSwapUint32(&d.recursorEnabled, 1, 0) {
|
||||||
d.mux.HandleRemove(".")
|
d.mux.HandleRemove(".")
|
||||||
d.logger.Println("[DEBUG] dns: recursor disabled")
|
d.logger.Debug("recursor disabled")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -301,9 +302,12 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
defer func(s time.Time) {
|
defer func(s time.Time) {
|
||||||
metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s,
|
metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s,
|
||||||
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
||||||
d.logger.Printf("[DEBUG] dns: request for %v (%v) from client %s (%s)",
|
d.logger.Debug("request served from client",
|
||||||
q, time.Since(s), resp.RemoteAddr().String(),
|
"question", q,
|
||||||
resp.RemoteAddr().Network())
|
"latency", time.Since(s).String(),
|
||||||
|
"client", resp.RemoteAddr().String(),
|
||||||
|
"client_network", resp.RemoteAddr().Network(),
|
||||||
|
)
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
cfg := d.config.Load().(*dnsConfig)
|
cfg := d.config.Load().(*dnsConfig)
|
||||||
|
@ -390,7 +394,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
|
|
||||||
// Write out the complete response
|
// Write out the complete response
|
||||||
if err := resp.WriteMsg(m); err != nil {
|
if err := resp.WriteMsg(m); err != nil {
|
||||||
d.logger.Printf("[WARN] dns: failed to respond: %v", err)
|
d.logger.Warn("failed to respond", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,9 +404,14 @@ func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
defer func(s time.Time) {
|
defer func(s time.Time) {
|
||||||
metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s,
|
metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s,
|
||||||
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
||||||
d.logger.Printf("[DEBUG] dns: request for name %v type %v class %v (took %v) from client %s (%s)",
|
d.logger.Debug("request served from client",
|
||||||
q.Name, dns.Type(q.Qtype), dns.Class(q.Qclass), time.Since(s), resp.RemoteAddr().String(),
|
"name", q.Name,
|
||||||
resp.RemoteAddr().Network())
|
"type", dns.Type(q.Qtype),
|
||||||
|
"class", dns.Class(q.Qclass),
|
||||||
|
"latency", time.Since(s).String(),
|
||||||
|
"client", resp.RemoteAddr().String(),
|
||||||
|
"client_network", resp.RemoteAddr().Network(),
|
||||||
|
)
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
// Switch to TCP if the client is
|
// Switch to TCP if the client is
|
||||||
|
@ -447,7 +456,7 @@ func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
|
|
||||||
// Write out the complete response
|
// Write out the complete response
|
||||||
if err := resp.WriteMsg(m); err != nil {
|
if err := resp.WriteMsg(m); err != nil {
|
||||||
d.logger.Printf("[WARN] dns: failed to respond: %v", err)
|
d.logger.Warn("failed to respond", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,12 +490,12 @@ func (d *DNSServer) addSOA(cfg *dnsConfig, msg *dns.Msg) {
|
||||||
func (d *DNSServer) nameservers(cfg *dnsConfig, edns bool, maxRecursionLevel int, req *dns.Msg) (ns []dns.RR, extra []dns.RR) {
|
func (d *DNSServer) nameservers(cfg *dnsConfig, edns bool, maxRecursionLevel int, req *dns.Msg) (ns []dns.RR, extra []dns.RR) {
|
||||||
out, err := d.lookupServiceNodes(cfg, d.agent.config.Datacenter, structs.ConsulServiceName, "", structs.DefaultEnterpriseMeta(), false, maxRecursionLevel)
|
out, err := d.lookupServiceNodes(cfg, d.agent.config.Datacenter, structs.ConsulServiceName, "", structs.DefaultEnterpriseMeta(), false, maxRecursionLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Printf("[WARN] dns: Unable to get list of servers: %s", err)
|
d.logger.Warn("Unable to get list of servers", "error", err)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(out.Nodes) == 0 {
|
if len(out.Nodes) == 0 {
|
||||||
d.logger.Printf("[WARN] dns: no servers found")
|
d.logger.Warn("no servers found")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,7 +506,7 @@ func (d *DNSServer) nameservers(cfg *dnsConfig, edns bool, maxRecursionLevel int
|
||||||
name, dc := o.Node.Node, o.Node.Datacenter
|
name, dc := o.Node.Node, o.Node.Datacenter
|
||||||
|
|
||||||
if InvalidDnsRe.MatchString(name) {
|
if InvalidDnsRe.MatchString(name) {
|
||||||
d.logger.Printf("[WARN] dns: Skipping invalid node %q for NS records", name)
|
d.logger.Warn("Skipping invalid node for NS records", "node", name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,7 +542,7 @@ func (d *DNSServer) dispatch(network string, remoteAddr net.Addr, req, resp *dns
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DNSServer) invalidQuery(req, resp *dns.Msg, cfg *dnsConfig, qName string) {
|
func (d *DNSServer) invalidQuery(req, resp *dns.Msg, cfg *dnsConfig, qName string) {
|
||||||
d.logger.Printf("[WARN] dns: QName invalid: %s", qName)
|
d.logger.Warn("QName invalid", "qname", qName)
|
||||||
d.addSOA(cfg, resp)
|
d.addSOA(cfg, resp)
|
||||||
resp.SetRcode(req, dns.RcodeNameError)
|
resp.SetRcode(req, dns.RcodeNameError)
|
||||||
}
|
}
|
||||||
|
@ -717,7 +726,7 @@ func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *d
|
||||||
return
|
return
|
||||||
|
|
||||||
INVALID:
|
INVALID:
|
||||||
d.logger.Printf("[WARN] dns: QName invalid: %s", qName)
|
d.logger.Warn("QName invalid", "qname", qName)
|
||||||
d.addSOA(cfg, resp)
|
d.addSOA(cfg, resp)
|
||||||
resp.SetRcode(req, dns.RcodeNameError)
|
resp.SetRcode(req, dns.RcodeNameError)
|
||||||
return
|
return
|
||||||
|
@ -756,7 +765,7 @@ func (d *DNSServer) nodeLookup(cfg *dnsConfig, network, datacenter, node string,
|
||||||
}
|
}
|
||||||
out, err := d.lookupNode(cfg, args)
|
out, err := d.lookupNode(cfg, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Printf("[ERR] dns: rpc error: %v", err)
|
d.logger.Error("rpc error", "error", err)
|
||||||
resp.SetRcode(req, dns.RcodeServerFailure)
|
resp.SetRcode(req, dns.RcodeServerFailure)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -816,7 +825,7 @@ RPC:
|
||||||
if out.LastContact > cfg.MaxStale {
|
if out.LastContact > cfg.MaxStale {
|
||||||
args.AllowStale = false
|
args.AllowStale = false
|
||||||
useCache = false
|
useCache = false
|
||||||
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
d.logger.Warn("Query results too stale, re-requesting")
|
||||||
goto RPC
|
goto RPC
|
||||||
} else if out.LastContact > staleCounterThreshold {
|
} else if out.LastContact > staleCounterThreshold {
|
||||||
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
||||||
|
@ -968,9 +977,11 @@ func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if truncated {
|
if truncated {
|
||||||
d.logger.Printf("[DEBUG] dns: TCP answer to %v too large truncated recs:=%d/%d, size:=%d/%d",
|
d.logger.Debug("TCP answer to question too large, truncated",
|
||||||
req.Question,
|
"question", req.Question,
|
||||||
len(resp.Answer), originalNumRecords, resp.Len(), originalSize)
|
"records", fmt.Sprintf("%d/%d", len(resp.Answer), originalNumRecords),
|
||||||
|
"size", fmt.Sprintf("%d/%d", resp.Len(), originalSize),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return truncated
|
return truncated
|
||||||
}
|
}
|
||||||
|
@ -1082,7 +1093,10 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag
|
||||||
// This should never happen, but we want to protect against panics
|
// This should never happen, but we want to protect against panics
|
||||||
return out, fmt.Errorf("internal error: response type not correct")
|
return out, fmt.Errorf("internal error: response type not correct")
|
||||||
}
|
}
|
||||||
d.logger.Printf("[TRACE] dns: cache hit: %v for service %s", m.Hit, service)
|
d.logger.Trace("cache results for service",
|
||||||
|
"cache_hit", m.Hit,
|
||||||
|
"service", service,
|
||||||
|
)
|
||||||
|
|
||||||
out = *reply
|
out = *reply
|
||||||
} else {
|
} else {
|
||||||
|
@ -1098,7 +1112,7 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag
|
||||||
// redo the request the response was too stale
|
// redo the request the response was too stale
|
||||||
if args.AllowStale && out.LastContact > cfg.MaxStale {
|
if args.AllowStale && out.LastContact > cfg.MaxStale {
|
||||||
args.AllowStale = false
|
args.AllowStale = false
|
||||||
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
d.logger.Warn("Query results too stale, re-requesting")
|
||||||
|
|
||||||
if err := d.agent.RPC("Health.ServiceNodes", &args, &out); err != nil {
|
if err := d.agent.RPC("Health.ServiceNodes", &args, &out); err != nil {
|
||||||
return structs.IndexedCheckServiceNodes{}, err
|
return structs.IndexedCheckServiceNodes{}, err
|
||||||
|
@ -1117,7 +1131,7 @@ func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, datacenter, service, tag
|
||||||
func (d *DNSServer) serviceLookup(cfg *dnsConfig, network, datacenter, service, tag string, entMeta *structs.EnterpriseMeta, connect bool, req, resp *dns.Msg, maxRecursionLevel int) {
|
func (d *DNSServer) serviceLookup(cfg *dnsConfig, network, datacenter, service, tag string, entMeta *structs.EnterpriseMeta, connect bool, req, resp *dns.Msg, maxRecursionLevel int) {
|
||||||
out, err := d.lookupServiceNodes(cfg, datacenter, service, tag, entMeta, connect, maxRecursionLevel)
|
out, err := d.lookupServiceNodes(cfg, datacenter, service, tag, entMeta, connect, maxRecursionLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Printf("[ERR] dns: rpc error: %v", err)
|
d.logger.Error("rpc error", "error", err)
|
||||||
resp.SetRcode(req, dns.RcodeServerFailure)
|
resp.SetRcode(req, dns.RcodeServerFailure)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1238,7 +1252,10 @@ func (d *DNSServer) preparedQueryLookup(cfg *dnsConfig, network, datacenter, que
|
||||||
var err error
|
var err error
|
||||||
ttl, err = time.ParseDuration(out.DNS.TTL)
|
ttl, err = time.ParseDuration(out.DNS.TTL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.logger.Printf("[WARN] dns: Failed to parse TTL '%s' for prepared query '%s', ignoring", out.DNS.TTL, query)
|
d.logger.Warn("Failed to parse TTL for prepared query , ignoring",
|
||||||
|
"ttl", out.DNS.TTL,
|
||||||
|
"prepared_query", query,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ttl, _ = cfg.GetTTLForService(out.Service)
|
ttl, _ = cfg.GetTTLForService(out.Service)
|
||||||
|
@ -1283,7 +1300,10 @@ RPC:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.logger.Printf("[TRACE] dns: cache hit: %v for prepared query %s", m.Hit, args.QueryIDOrName)
|
d.logger.Trace("cache results for prepared query",
|
||||||
|
"cache_hit", m.Hit,
|
||||||
|
"prepared_query", args.QueryIDOrName,
|
||||||
|
)
|
||||||
|
|
||||||
out = *reply
|
out = *reply
|
||||||
} else {
|
} else {
|
||||||
|
@ -1296,7 +1316,7 @@ RPC:
|
||||||
if args.AllowStale {
|
if args.AllowStale {
|
||||||
if out.LastContact > cfg.MaxStale {
|
if out.LastContact > cfg.MaxStale {
|
||||||
args.AllowStale = false
|
args.AllowStale = false
|
||||||
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
d.logger.Warn("Query results too stale, re-requesting")
|
||||||
goto RPC
|
goto RPC
|
||||||
} else if out.LastContact > staleCounterThreshold {
|
} else if out.LastContact > staleCounterThreshold {
|
||||||
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
||||||
|
@ -1714,9 +1734,13 @@ func (d *DNSServer) handleRecurse(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
q := req.Question[0]
|
q := req.Question[0]
|
||||||
network := "udp"
|
network := "udp"
|
||||||
defer func(s time.Time) {
|
defer func(s time.Time) {
|
||||||
d.logger.Printf("[DEBUG] dns: request for %v (%s) (%v) from client %s (%s)",
|
d.logger.Debug("request served from client",
|
||||||
q, network, time.Since(s), resp.RemoteAddr().String(),
|
"question", q,
|
||||||
resp.RemoteAddr().Network())
|
"network", network,
|
||||||
|
"latency", time.Since(s).String(),
|
||||||
|
"client", resp.RemoteAddr().String(),
|
||||||
|
"client_network", resp.RemoteAddr().Network(),
|
||||||
|
)
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
// Switch to TCP if the client is
|
// Switch to TCP if the client is
|
||||||
|
@ -1733,7 +1757,12 @@ func (d *DNSServer) handleRecurse(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
r, rtt, err = c.Exchange(req, recursor)
|
r, rtt, err = c.Exchange(req, recursor)
|
||||||
// Check if the response is valid and has the desired Response code
|
// Check if the response is valid and has the desired Response code
|
||||||
if r != nil && (r.Rcode != dns.RcodeSuccess && r.Rcode != dns.RcodeNameError) {
|
if r != nil && (r.Rcode != dns.RcodeSuccess && r.Rcode != dns.RcodeNameError) {
|
||||||
d.logger.Printf("[DEBUG] dns: recurse RTT for %v (%v) Recursor queried: %v Status returned: %v", q, rtt, recursor, dns.RcodeToString[r.Rcode])
|
d.logger.Debug("recurse failed for question",
|
||||||
|
"question", q,
|
||||||
|
"rtt", rtt,
|
||||||
|
"recursor", recursor,
|
||||||
|
"rcode", dns.RcodeToString[r.Rcode],
|
||||||
|
)
|
||||||
// If we still have recursors to forward the query to,
|
// If we still have recursors to forward the query to,
|
||||||
// we move forward onto the next one else the loop ends
|
// we move forward onto the next one else the loop ends
|
||||||
continue
|
continue
|
||||||
|
@ -1744,18 +1773,25 @@ func (d *DNSServer) handleRecurse(resp dns.ResponseWriter, req *dns.Msg) {
|
||||||
r.Compress = !cfg.DisableCompression
|
r.Compress = !cfg.DisableCompression
|
||||||
|
|
||||||
// Forward the response
|
// Forward the response
|
||||||
d.logger.Printf("[DEBUG] dns: recurse RTT for %v (%v) Recursor queried: %v", q, rtt, recursor)
|
d.logger.Debug("recurse succeeded for question",
|
||||||
|
"question", q,
|
||||||
|
"rtt", rtt,
|
||||||
|
"recursor", recursor,
|
||||||
|
)
|
||||||
if err := resp.WriteMsg(r); err != nil {
|
if err := resp.WriteMsg(r); err != nil {
|
||||||
d.logger.Printf("[WARN] dns: failed to respond: %v", err)
|
d.logger.Warn("failed to respond", "error", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.logger.Printf("[ERR] dns: recurse failed: %v", err)
|
d.logger.Error("recurse failed", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If all resolvers fail, return a SERVFAIL message
|
// If all resolvers fail, return a SERVFAIL message
|
||||||
d.logger.Printf("[ERR] dns: all resolvers failed for %v from client %s (%s)",
|
d.logger.Error("all resolvers failed for question from client",
|
||||||
q, resp.RemoteAddr().String(), resp.RemoteAddr().Network())
|
"question", q,
|
||||||
|
"client", resp.RemoteAddr().String(),
|
||||||
|
"client_network", resp.RemoteAddr().Network(),
|
||||||
|
)
|
||||||
m := &dns.Msg{}
|
m := &dns.Msg{}
|
||||||
m.SetReply(req)
|
m.SetReply(req)
|
||||||
m.Compress = !cfg.DisableCompression
|
m.Compress = !cfg.DisableCompression
|
||||||
|
@ -1775,7 +1811,7 @@ func (d *DNSServer) resolveCNAME(cfg *dnsConfig, name string, maxRecursionLevel
|
||||||
|
|
||||||
if ln := strings.ToLower(name); strings.HasSuffix(ln, "."+d.domain) || strings.HasSuffix(ln, "."+d.altDomain) {
|
if ln := strings.ToLower(name); strings.HasSuffix(ln, "."+d.domain) || strings.HasSuffix(ln, "."+d.altDomain) {
|
||||||
if maxRecursionLevel < 1 {
|
if maxRecursionLevel < 1 {
|
||||||
d.logger.Printf("[ERR] dns: Infinite recursion detected for %s, won't perform any CNAME resolution.", name)
|
d.logger.Error("Infinite recursion detected for name, won't perform any CNAME resolution.", "name", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
req := &dns.Msg{}
|
req := &dns.Msg{}
|
||||||
|
@ -1804,11 +1840,17 @@ func (d *DNSServer) resolveCNAME(cfg *dnsConfig, name string, maxRecursionLevel
|
||||||
for _, recursor := range cfg.Recursors {
|
for _, recursor := range cfg.Recursors {
|
||||||
r, rtt, err = c.Exchange(m, recursor)
|
r, rtt, err = c.Exchange(m, recursor)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
d.logger.Printf("[DEBUG] dns: cname recurse RTT for %v (%v)", name, rtt)
|
d.logger.Debug("cname recurse RTT for name",
|
||||||
|
"name", name,
|
||||||
|
"rtt", rtt,
|
||||||
|
)
|
||||||
return r.Answer
|
return r.Answer
|
||||||
}
|
}
|
||||||
d.logger.Printf("[ERR] dns: cname recurse failed for %v: %v", name, err)
|
d.logger.Error("cname recurse failed for name",
|
||||||
|
"name", name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
d.logger.Printf("[ERR] dns: all resolvers failed for %v", name)
|
d.logger.Error("all resolvers failed for name", "name", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,7 +134,7 @@ RUN_QUERY:
|
||||||
if authz.EventRead(name, nil) == acl.Allow {
|
if authz.EventRead(name, nil) == acl.Allow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.agent.logger.Printf("[DEBUG] agent: dropping event %q from result due to ACLs", name)
|
s.agent.logger.Debug("dropping event from result due to ACLs", "event", name)
|
||||||
events = append(events[:i], events[i+1:]...)
|
events = append(events[:i], events[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -380,6 +381,7 @@ var (
|
||||||
|
|
||||||
// wrap is used to wrap functions to make them more convenient
|
// wrap is used to wrap functions to make them more convenient
|
||||||
func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
|
httpLogger := s.agent.logger.Named(logging.HTTP)
|
||||||
return func(resp http.ResponseWriter, req *http.Request) {
|
return func(resp http.ResponseWriter, req *http.Request) {
|
||||||
setHeaders(resp, s.agent.config.HTTPResponseHeaders)
|
setHeaders(resp, s.agent.config.HTTPResponseHeaders)
|
||||||
setTranslateAddr(resp, s.agent.config.TranslateWANAddrs)
|
setTranslateAddr(resp, s.agent.config.TranslateWANAddrs)
|
||||||
|
@ -387,7 +389,10 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
// Obfuscate any tokens from appearing in the logs
|
// Obfuscate any tokens from appearing in the logs
|
||||||
formVals, err := url.ParseQuery(req.URL.RawQuery)
|
formVals, err := url.ParseQuery(req.URL.RawQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.agent.logger.Printf("[ERR] http: Failed to decode query: %s from=%s", err, req.RemoteAddr)
|
httpLogger.Error("Failed to decode query",
|
||||||
|
"from", req.RemoteAddr,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
resp.WriteHeader(http.StatusInternalServerError)
|
resp.WriteHeader(http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -405,7 +410,12 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
|
|
||||||
if s.blacklist.Block(req.URL.Path) {
|
if s.blacklist.Block(req.URL.Path) {
|
||||||
errMsg := "Endpoint is blocked by agent configuration"
|
errMsg := "Endpoint is blocked by agent configuration"
|
||||||
s.agent.logger.Printf("[ERR] http: Request %s %v, error: %v from=%s", req.Method, logURL, err, req.RemoteAddr)
|
httpLogger.Error("Request error",
|
||||||
|
"method", req.Method,
|
||||||
|
"url", logURL,
|
||||||
|
"from", req.RemoteAddr,
|
||||||
|
"error", errMsg,
|
||||||
|
)
|
||||||
resp.WriteHeader(http.StatusForbidden)
|
resp.WriteHeader(http.StatusForbidden)
|
||||||
fmt.Fprint(resp, errMsg)
|
fmt.Fprint(resp, errMsg)
|
||||||
return
|
return
|
||||||
|
@ -444,7 +454,12 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
}
|
}
|
||||||
|
|
||||||
handleErr := func(err error) {
|
handleErr := func(err error) {
|
||||||
s.agent.logger.Printf("[ERR] http: Request %s %v, error: %v from=%s", req.Method, logURL, err, req.RemoteAddr)
|
httpLogger.Error("Request error",
|
||||||
|
"method", req.Method,
|
||||||
|
"url", logURL,
|
||||||
|
"from", req.RemoteAddr,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
switch {
|
switch {
|
||||||
case isForbidden(err):
|
case isForbidden(err):
|
||||||
resp.WriteHeader(http.StatusForbidden)
|
resp.WriteHeader(http.StatusForbidden)
|
||||||
|
@ -476,7 +491,12 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
s.agent.logger.Printf("[DEBUG] http: Request %s %v (%v) from=%s", req.Method, logURL, time.Since(start), req.RemoteAddr)
|
httpLogger.Debug("Request finished",
|
||||||
|
"method", req.Method,
|
||||||
|
"url", logURL,
|
||||||
|
"from", req.RemoteAddr,
|
||||||
|
"latency", time.Since(start).String(),
|
||||||
|
)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var obj interface{}
|
var obj interface{}
|
||||||
|
|
|
@ -12,8 +12,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// extra endpoints that should be tested, and their allowed methods
|
// extra endpoints that should be tested, and their allowed methods
|
||||||
|
@ -75,7 +73,6 @@ func TestHTTPAPI_MethodNotAllowed_OSS(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
a.Agent.LogWriter = logger.NewLogWriter(512)
|
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
// Use the master token here so the wait actually works.
|
// Use the master token here so the wait actually works.
|
||||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("sekrit"))
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("sekrit"))
|
||||||
|
@ -128,7 +125,6 @@ func TestHTTPAPI_MethodNotAllowed_OSS(t *testing.T) {
|
||||||
|
|
||||||
func TestHTTPAPI_OptionMethod_OSS(t *testing.T) {
|
func TestHTTPAPI_OptionMethod_OSS(t *testing.T) {
|
||||||
a := NewTestAgent(t, t.Name(), `acl_datacenter = "dc1"`)
|
a := NewTestAgent(t, t.Name(), `acl_datacenter = "dc1"`)
|
||||||
a.Agent.LogWriter = logger.NewLogWriter(512)
|
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
@ -170,7 +166,6 @@ func TestHTTPAPI_AllowedNets_OSS(t *testing.T) {
|
||||||
allow_write_http_from = ["127.0.0.1/8"]
|
allow_write_http_from = ["127.0.0.1/8"]
|
||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
a.Agent.LogWriter = logger.NewLogWriter(512)
|
|
||||||
defer a.Shutdown()
|
defer a.Shutdown()
|
||||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
|
|
@ -497,9 +497,9 @@ func TestHTTP_wrap_obfuscateLog(t *testing.T) {
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
req, _ := http.NewRequest("GET", url, nil)
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
a.srv.wrap(handler, []string{"GET"})(resp, req)
|
a.srv.wrap(handler, []string{"GET"})(resp, req)
|
||||||
|
bufout := buf.String()
|
||||||
if got := buf.String(); !strings.Contains(got, want) {
|
if !strings.Contains(bufout, want) {
|
||||||
t.Fatalf("got %s want %s", got, want)
|
t.Fatalf("got %s want %s", bufout, want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -18,6 +17,7 @@ import (
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const fullSyncReadMaxStale = 2 * time.Second
|
const fullSyncReadMaxStale = 2 * time.Second
|
||||||
|
@ -143,7 +143,7 @@ type State struct {
|
||||||
// created.
|
// created.
|
||||||
TriggerSyncChanges func()
|
TriggerSyncChanges func()
|
||||||
|
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
// Config is the agent config
|
// Config is the agent config
|
||||||
config Config
|
config Config
|
||||||
|
@ -178,10 +178,10 @@ type State struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewState creates a new local state for the agent.
|
// NewState creates a new local state for the agent.
|
||||||
func NewState(c Config, lg *log.Logger, tokens *token.Store) *State {
|
func NewState(c Config, logger hclog.Logger, tokens *token.Store) *State {
|
||||||
l := &State{
|
l := &State{
|
||||||
config: c,
|
config: c,
|
||||||
logger: lg,
|
logger: logger,
|
||||||
services: make(map[structs.ServiceID]*ServiceState),
|
services: make(map[structs.ServiceID]*ServiceState),
|
||||||
checks: make(map[structs.CheckID]*CheckState),
|
checks: make(map[structs.CheckID]*CheckState),
|
||||||
checkAliases: make(map[structs.ServiceID]map[structs.CheckID]chan<- struct{}),
|
checkAliases: make(map[structs.ServiceID]map[structs.CheckID]chan<- struct{}),
|
||||||
|
@ -954,7 +954,7 @@ func (l *State) updateSyncState() error {
|
||||||
// The Serf check is created automatically and does not
|
// The Serf check is created automatically and does not
|
||||||
// need to be deregistered.
|
// need to be deregistered.
|
||||||
if id == structs.SerfCompoundCheckID {
|
if id == structs.SerfCompoundCheckID {
|
||||||
l.logger.Printf("[DEBUG] agent: Skipping remote check %q since it is managed automatically", structs.SerfCheckID)
|
l.logger.Debug("Skipping remote check since it is managed automatically", "check", structs.SerfCheckID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1037,7 +1037,7 @@ func (l *State) SyncChanges() error {
|
||||||
case !s.InSync:
|
case !s.InSync:
|
||||||
err = l.syncService(id)
|
err = l.syncService(id)
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[DEBUG] agent: Service %q in sync", id.String())
|
l.logger.Debug("Service in sync", "service", id.String())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1058,7 +1058,7 @@ func (l *State) SyncChanges() error {
|
||||||
}
|
}
|
||||||
err = l.syncCheck(id)
|
err = l.syncCheck(id)
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[DEBUG] agent: Check %q in sync", id.String())
|
l.logger.Debug("Check in sync", "check", id.String())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1068,7 +1068,7 @@ func (l *State) SyncChanges() error {
|
||||||
// Now sync the node level info if we need to, and didn't do any of
|
// Now sync the node level info if we need to, and didn't do any of
|
||||||
// the other sync operations.
|
// the other sync operations.
|
||||||
if l.nodeInfoInSync {
|
if l.nodeInfoInSync {
|
||||||
l.logger.Printf("[DEBUG] agent: Node info in sync")
|
l.logger.Debug("Node info in sync")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return l.syncNodeInfo()
|
return l.syncNodeInfo()
|
||||||
|
@ -1100,7 +1100,7 @@ func (l *State) deleteService(key structs.ServiceID) error {
|
||||||
l.pruneCheck(c.Check.CompoundCheckID())
|
l.pruneCheck(c.Check.CompoundCheckID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
l.logger.Printf("[INFO] agent: Deregistered service %q", key.ID)
|
l.logger.Info("Deregistered service", "service", key.ID)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
||||||
|
@ -1108,12 +1108,15 @@ func (l *State) deleteService(key structs.ServiceID) error {
|
||||||
// todo(fs): some backoff strategy might be a better solution
|
// todo(fs): some backoff strategy might be a better solution
|
||||||
l.services[key].InSync = true
|
l.services[key].InSync = true
|
||||||
accessorID := l.aclAccessorID(st)
|
accessorID := l.aclAccessorID(st)
|
||||||
l.logger.Printf("[DEBUG] agent: Service deregistration blocked by ACLs, service=%q accessorID=%v", key.String(), accessorID)
|
l.logger.Warn("Service deregistration blocked by ACLs", "service", key.String(), "accessorID", accessorID)
|
||||||
metrics.IncrCounter([]string{"acl", "blocked", "service", "deregistration"}, 1)
|
metrics.IncrCounter([]string{"acl", "blocked", "service", "deregistration"}, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[WARN] agent: Deregistering service %q failed. %s", key, err)
|
l.logger.Warn("Deregistering service failed.",
|
||||||
|
"service", key.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1137,7 +1140,7 @@ func (l *State) deleteCheck(key structs.CheckID) error {
|
||||||
switch {
|
switch {
|
||||||
case err == nil || strings.Contains(err.Error(), "Unknown check"):
|
case err == nil || strings.Contains(err.Error(), "Unknown check"):
|
||||||
l.pruneCheck(key)
|
l.pruneCheck(key)
|
||||||
l.logger.Printf("[INFO] agent: Deregistered check %q", key.String())
|
l.logger.Info("Deregistered check", "check", key.String())
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
||||||
|
@ -1145,12 +1148,15 @@ func (l *State) deleteCheck(key structs.CheckID) error {
|
||||||
// todo(fs): some backoff strategy might be a better solution
|
// todo(fs): some backoff strategy might be a better solution
|
||||||
l.checks[key].InSync = true
|
l.checks[key].InSync = true
|
||||||
accessorID := l.aclAccessorID(ct)
|
accessorID := l.aclAccessorID(ct)
|
||||||
l.logger.Printf("[DEBUG] agent: Check deregistration blocked by ACLs, check=%q accessorID=%q", key.String(), accessorID)
|
l.logger.Warn("Check deregistration blocked by ACLs", "check", key.String(), "accessorID", accessorID)
|
||||||
metrics.IncrCounter([]string{"acl", "blocked", "check", "deregistration"}, 1)
|
metrics.IncrCounter([]string{"acl", "blocked", "check", "deregistration"}, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[WARN] agent: Deregistering check %q failed. %s", key.String(), err)
|
l.logger.Warn("Deregistering check failed.",
|
||||||
|
"check", key.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1220,7 +1226,7 @@ func (l *State) syncService(key structs.ServiceID) error {
|
||||||
checkKey.Init(check.CheckID, &check.EnterpriseMeta)
|
checkKey.Init(check.CheckID, &check.EnterpriseMeta)
|
||||||
l.checks[checkKey].InSync = true
|
l.checks[checkKey].InSync = true
|
||||||
}
|
}
|
||||||
l.logger.Printf("[INFO] agent: Synced service %q", key.String())
|
l.logger.Info("Synced service", "service", key.String())
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
||||||
|
@ -1233,12 +1239,15 @@ func (l *State) syncService(key structs.ServiceID) error {
|
||||||
l.checks[checkKey].InSync = true
|
l.checks[checkKey].InSync = true
|
||||||
}
|
}
|
||||||
accessorID := l.aclAccessorID(st)
|
accessorID := l.aclAccessorID(st)
|
||||||
l.logger.Printf("[DEBUG] agent: Service registration blocked by ACLs, check=%q accessorID=%s", key.String(), accessorID)
|
l.logger.Warn("Service registration blocked by ACLs", "service", key.String(), "accessorID", accessorID)
|
||||||
metrics.IncrCounter([]string{"acl", "blocked", "service", "registration"}, 1)
|
metrics.IncrCounter([]string{"acl", "blocked", "service", "registration"}, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[WARN] agent: Syncing service %q failed. %s", key.String(), err)
|
l.logger.Warn("Syncing service failed.",
|
||||||
|
"service", key.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1276,7 +1285,7 @@ func (l *State) syncCheck(key structs.CheckID) error {
|
||||||
// Given how the register API works, this info is also updated
|
// Given how the register API works, this info is also updated
|
||||||
// every time we sync a check.
|
// every time we sync a check.
|
||||||
l.nodeInfoInSync = true
|
l.nodeInfoInSync = true
|
||||||
l.logger.Printf("[INFO] agent: Synced check %q", key.String())
|
l.logger.Info("Synced check", "check", key.String())
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
||||||
|
@ -1284,12 +1293,15 @@ func (l *State) syncCheck(key structs.CheckID) error {
|
||||||
// todo(fs): some backoff strategy might be a better solution
|
// todo(fs): some backoff strategy might be a better solution
|
||||||
l.checks[key].InSync = true
|
l.checks[key].InSync = true
|
||||||
accessorID := l.aclAccessorID(ct)
|
accessorID := l.aclAccessorID(ct)
|
||||||
l.logger.Printf("[DEBUG] agent: Check registration blocked by ACLs, check=%q accessorID=%q", key, accessorID)
|
l.logger.Warn("Check registration blocked by ACLs", "check", key.String(), "accessorID", accessorID)
|
||||||
metrics.IncrCounter([]string{"acl", "blocked", "check", "registration"}, 1)
|
metrics.IncrCounter([]string{"acl", "blocked", "check", "registration"}, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[WARN] agent: Syncing check %q failed. %s", key, err)
|
l.logger.Warn("Syncing check failed.",
|
||||||
|
"check", key.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1310,7 +1322,7 @@ func (l *State) syncNodeInfo() error {
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
l.nodeInfoInSync = true
|
l.nodeInfoInSync = true
|
||||||
l.logger.Printf("[INFO] agent: Synced node info")
|
l.logger.Info("Synced node info")
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
case acl.IsErrPermissionDenied(err), acl.IsErrNotFound(err):
|
||||||
|
@ -1318,12 +1330,12 @@ func (l *State) syncNodeInfo() error {
|
||||||
// todo(fs): some backoff strategy might be a better solution
|
// todo(fs): some backoff strategy might be a better solution
|
||||||
l.nodeInfoInSync = true
|
l.nodeInfoInSync = true
|
||||||
accessorID := l.aclAccessorID(at)
|
accessorID := l.aclAccessorID(at)
|
||||||
l.logger.Printf("[DEBUG] agent: Node info update blocked by ACLs, nodeID=%q accessorID=%q", l.config.NodeID, accessorID)
|
l.logger.Warn("Node info update blocked by ACLs", "node", l.config.NodeID, "accessorID", accessorID)
|
||||||
metrics.IncrCounter([]string{"acl", "blocked", "node", "registration"}, 1)
|
metrics.IncrCounter([]string{"acl", "blocked", "node", "registration"}, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
l.logger.Printf("[WARN] agent: Syncing node info failed. %s", err)
|
l.logger.Warn("Syncing node info failed.", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1350,7 +1362,7 @@ func (l *State) notifyIfAliased(serviceID structs.ServiceID) {
|
||||||
func (l *State) aclAccessorID(secretID string) string {
|
func (l *State) aclAccessorID(secretID string) string {
|
||||||
_, ident, err := l.Delegate.ResolveIdentityFromToken(secretID)
|
_, ident, err := l.Delegate.ResolveIdentityFromToken(secretID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.logger.Printf("[DEBUG] agent.local: %v", err)
|
l.logger.Debug("error", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if ident == nil {
|
if ident == nil {
|
||||||
|
|
|
@ -3,12 +3,12 @@ package local_test
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent"
|
"github.com/hashicorp/consul/agent"
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
|
@ -1966,9 +1966,12 @@ func checksInSync(state *local.State, wantChecks int) error {
|
||||||
|
|
||||||
func TestState_Notify(t *testing.T) {
|
func TestState_Notify(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
logger := hclog.New(&hclog.LoggerOptions{
|
||||||
|
Output: os.Stderr,
|
||||||
|
})
|
||||||
|
|
||||||
state := local.NewState(local.Config{},
|
state := local.NewState(local.Config{},
|
||||||
log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
|
logger, &token.Store{})
|
||||||
|
|
||||||
// Stub state syncing
|
// Stub state syncing
|
||||||
state.TriggerSyncChanges = func() {}
|
state.TriggerSyncChanges = func() {}
|
||||||
|
|
|
@ -1,16 +1,20 @@
|
||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/token"
|
"github.com/hashicorp/consul/agent/token"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/mitchellh/go-testing-interface"
|
"github.com/mitchellh/go-testing-interface"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestState returns a configured *State for testing.
|
// TestState returns a configured *State for testing.
|
||||||
func TestState(t testing.T) *State {
|
func TestState(t testing.T) *State {
|
||||||
result := NewState(Config{}, log.New(os.Stderr, "", log.LstdFlags), &token.Store{})
|
logger := hclog.New(&hclog.LoggerOptions{
|
||||||
|
Output: os.Stderr,
|
||||||
|
})
|
||||||
|
|
||||||
|
result := NewState(Config{}, logger, &token.Store{})
|
||||||
result.TriggerSyncChanges = func() {}
|
result.TriggerSyncChanges = func() {}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,12 +2,12 @@ package proxycfg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
"github.com/hashicorp/consul/agent/local"
|
"github.com/hashicorp/consul/agent/local"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -65,7 +65,7 @@ type ManagerConfig struct {
|
||||||
// for now and cleaner than passing the entire RuntimeConfig.
|
// for now and cleaner than passing the entire RuntimeConfig.
|
||||||
Source *structs.QuerySource
|
Source *structs.QuerySource
|
||||||
// logger is the agent's logger to be used for logging logs.
|
// logger is the agent's logger to be used for logging logs.
|
||||||
Logger *log.Logger
|
Logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager constructs a manager from the provided agent cache.
|
// NewManager constructs a manager from the provided agent cache.
|
||||||
|
@ -143,8 +143,10 @@ func (m *Manager) syncState() {
|
||||||
// validate more generally that that is always true.
|
// validate more generally that that is always true.
|
||||||
err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(sid))
|
err := m.ensureProxyServiceLocked(svc, m.State.ServiceToken(sid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.Logger.Printf("[ERR] failed to watch proxy service %s: %s", sid.String(),
|
m.Logger.Error("failed to watch proxy service",
|
||||||
err)
|
"service", sid.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,8 +267,9 @@ OUTER:
|
||||||
default:
|
default:
|
||||||
// This should not be possible since we should be the only sender, enforced
|
// This should not be possible since we should be the only sender, enforced
|
||||||
// by m.mu but error and drop the update rather than panic.
|
// by m.mu but error and drop the update rather than panic.
|
||||||
m.Logger.Printf("[ERR] proxycfg: failed to deliver ConfigSnapshot to %q",
|
m.Logger.Error("failed to deliver ConfigSnapshot to proxy",
|
||||||
snap.ProxyID.String())
|
"proxy", snap.ProxyID.String(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package proxycfg
|
package proxycfg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -17,6 +15,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/local"
|
"github.com/hashicorp/consul/agent/local"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/agent/token"
|
"github.com/hashicorp/consul/agent/token"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// assertLastReqArgs verifies that each request type had the correct source
|
// assertLastReqArgs verifies that each request type had the correct source
|
||||||
|
@ -312,8 +311,7 @@ func testManager_BasicLifecycle(
|
||||||
c := TestCacheWithTypes(t, types)
|
c := TestCacheWithTypes(t, types)
|
||||||
|
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
logger := testutil.Logger(t)
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
|
||||||
state := local.NewState(local.Config{}, logger, &token.Store{})
|
state := local.NewState(local.Config{}, logger, &token.Store{})
|
||||||
source := &structs.QuerySource{
|
source := &structs.QuerySource{
|
||||||
Node: "node1",
|
Node: "node1",
|
||||||
|
@ -450,7 +448,7 @@ func assertWatchChanRecvs(t *testing.T, ch <-chan *ConfigSnapshot, expect *Confi
|
||||||
|
|
||||||
func TestManager_deliverLatest(t *testing.T) {
|
func TestManager_deliverLatest(t *testing.T) {
|
||||||
// None of these need to do anything to test this method just be valid
|
// None of these need to do anything to test this method just be valid
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
cfg := ManagerConfig{
|
cfg := ManagerConfig{
|
||||||
Cache: cache.New(nil),
|
Cache: cache.New(nil),
|
||||||
State: local.NewState(local.Config{}, logger, &token.Store{}),
|
State: local.NewState(local.Config{}, logger, &token.Store{}),
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -12,6 +11,8 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/mitchellh/copystructure"
|
"github.com/mitchellh/copystructure"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
@ -40,7 +41,7 @@ const (
|
||||||
// is discarded and a new one created.
|
// is discarded and a new one created.
|
||||||
type state struct {
|
type state struct {
|
||||||
// logger, source and cache are required to be set before calling Watch.
|
// logger, source and cache are required to be set before calling Watch.
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
source *structs.QuerySource
|
source *structs.QuerySource
|
||||||
cache CacheNotifier
|
cache CacheNotifier
|
||||||
|
|
||||||
|
@ -252,8 +253,10 @@ func (s *state) initWatchesConnectProxy() error {
|
||||||
// Don't hard fail on a config typo, just warn. We'll fall back on
|
// Don't hard fail on a config typo, just warn. We'll fall back on
|
||||||
// the plain discovery chain if there is an error so it's safe to
|
// the plain discovery chain if there is an error so it's safe to
|
||||||
// continue.
|
// continue.
|
||||||
s.logger.Printf("[WARN] envoy: failed to parse Upstream[%s].Config: %s",
|
s.logger.Warn("failed to parse upstream config",
|
||||||
u.Identifier(), err)
|
"upstream", u.Identifier(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch u.DestinationType {
|
switch u.DestinationType {
|
||||||
|
@ -357,7 +360,8 @@ func (s *state) initWatchesMeshGateway() error {
|
||||||
}, serviceResolversWatchID, s.ch)
|
}, serviceResolversWatchID, s.ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] mesh-gateway: failed to register watch for service-resolver config entries")
|
s.logger.Named(logging.MeshGateway).
|
||||||
|
Error("failed to register watch for service-resolver config entries", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,7 +425,10 @@ func (s *state) run() {
|
||||||
return
|
return
|
||||||
case u := <-s.ch:
|
case u := <-s.ch:
|
||||||
if err := s.handleUpdate(u, &snap); err != nil {
|
if err := s.handleUpdate(u, &snap); err != nil {
|
||||||
s.logger.Printf("[ERR] %s watch error: %s", u.CorrelationID, err)
|
s.logger.Error("watch error",
|
||||||
|
"id", u.CorrelationID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -430,8 +437,10 @@ func (s *state) run() {
|
||||||
// etc on future updates.
|
// etc on future updates.
|
||||||
snapCopy, err := snap.Clone()
|
snapCopy, err := snap.Clone()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] Failed to copy config snapshot for proxy %s",
|
s.logger.Error("Failed to copy config snapshot for proxy",
|
||||||
s.proxyID)
|
"proxy", s.proxyID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.snapCh <- *snapCopy
|
s.snapCh <- *snapCopy
|
||||||
|
@ -452,8 +461,10 @@ func (s *state) run() {
|
||||||
// etc on future updates.
|
// etc on future updates.
|
||||||
snapCopy, err := snap.Clone()
|
snapCopy, err := snap.Clone()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] Failed to copy config snapshot for proxy %s",
|
s.logger.Error("Failed to copy config snapshot for proxy",
|
||||||
s.proxyID)
|
"proxy", s.proxyID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
replyCh <- snapCopy
|
replyCh <- snapCopy
|
||||||
|
@ -620,7 +631,11 @@ func (s *state) resetWatchesFromChain(
|
||||||
//
|
//
|
||||||
// TODO(rb): content hash based add/remove
|
// TODO(rb): content hash based add/remove
|
||||||
for targetID, cancelFn := range snap.ConnectProxy.WatchedUpstreams[id] {
|
for targetID, cancelFn := range snap.ConnectProxy.WatchedUpstreams[id] {
|
||||||
s.logger.Printf("[TRACE] proxycfg: upstream=%q:chain=%q: stopping watch of target %s", id, chain.ServiceName, targetID)
|
s.logger.Trace("stopping watch of target",
|
||||||
|
"upstream", id,
|
||||||
|
"chain", chain.ServiceName,
|
||||||
|
"target", targetID,
|
||||||
|
)
|
||||||
delete(snap.ConnectProxy.WatchedUpstreams[id], targetID)
|
delete(snap.ConnectProxy.WatchedUpstreams[id], targetID)
|
||||||
delete(snap.ConnectProxy.WatchedUpstreamEndpoints[id], targetID)
|
delete(snap.ConnectProxy.WatchedUpstreamEndpoints[id], targetID)
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
@ -628,7 +643,11 @@ func (s *state) resetWatchesFromChain(
|
||||||
|
|
||||||
needGateways := make(map[string]struct{})
|
needGateways := make(map[string]struct{})
|
||||||
for _, target := range chain.Targets {
|
for _, target := range chain.Targets {
|
||||||
s.logger.Printf("[TRACE] proxycfg: upstream=%q:chain=%q: initializing watch of target %s", id, chain.ServiceName, target.ID)
|
s.logger.Trace("initializing watch of target",
|
||||||
|
"upstream", id,
|
||||||
|
"chain", chain.ServiceName,
|
||||||
|
"target", target.ID,
|
||||||
|
)
|
||||||
|
|
||||||
// We'll get endpoints from the gateway query, but the health still has
|
// We'll get endpoints from the gateway query, but the health still has
|
||||||
// to come from the backing service query.
|
// to come from the backing service query.
|
||||||
|
@ -661,7 +680,11 @@ func (s *state) resetWatchesFromChain(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Printf("[TRACE] proxycfg: upstream=%q:chain=%q: initializing watch of mesh gateway in dc %s", id, chain.ServiceName, dc)
|
s.logger.Trace("initializing watch of mesh gateway in datacenter",
|
||||||
|
"upstream", id,
|
||||||
|
"chain", chain.ServiceName,
|
||||||
|
"datacenter", dc,
|
||||||
|
)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(s.ctx)
|
ctx, cancel := context.WithCancel(s.ctx)
|
||||||
err := s.watchMeshGateway(ctx, dc, id)
|
err := s.watchMeshGateway(ctx, dc, id)
|
||||||
|
@ -677,7 +700,11 @@ func (s *state) resetWatchesFromChain(
|
||||||
if _, ok := needGateways[dc]; ok {
|
if _, ok := needGateways[dc]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.logger.Printf("[TRACE] proxycfg: upstream=%q:chain=%q: stopping watch of mesh gateway in dc %s", id, chain.ServiceName, dc)
|
s.logger.Trace("stopping watch of mesh gateway in datacenter",
|
||||||
|
"upstream", id,
|
||||||
|
"chain", chain.ServiceName,
|
||||||
|
"datacenter", dc,
|
||||||
|
)
|
||||||
delete(snap.ConnectProxy.WatchedGateways[id], dc)
|
delete(snap.ConnectProxy.WatchedGateways[id], dc)
|
||||||
delete(snap.ConnectProxy.WatchedGatewayEndpoints[id], dc)
|
delete(snap.ConnectProxy.WatchedGatewayEndpoints[id], dc)
|
||||||
cancelFn()
|
cancelFn()
|
||||||
|
@ -691,6 +718,8 @@ func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapsho
|
||||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
meshLogger := s.logger.Named(logging.MeshGateway)
|
||||||
|
|
||||||
switch u.CorrelationID {
|
switch u.CorrelationID {
|
||||||
case rootsWatchID:
|
case rootsWatchID:
|
||||||
roots, ok := u.Result.(*structs.IndexedCARoots)
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
||||||
|
@ -718,7 +747,10 @@ func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapsho
|
||||||
}, fmt.Sprintf("connect-service:%s", sid.String()), s.ch)
|
}, fmt.Sprintf("connect-service:%s", sid.String()), s.ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] mesh-gateway: failed to register watch for connect-service:%s", sid.String())
|
meshLogger.Error("failed to register watch for connect-service",
|
||||||
|
"service", sid.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
cancel()
|
cancel()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -763,7 +795,10 @@ func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapsho
|
||||||
}, fmt.Sprintf("mesh-gateway:%s", dc), s.ch)
|
}, fmt.Sprintf("mesh-gateway:%s", dc), s.ch)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] mesh-gateway: failed to register watch for mesh-gateway:%s", dc)
|
meshLogger.Error("failed to register watch for mesh-gateway",
|
||||||
|
"datacenter", dc,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
cancel()
|
cancel()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -622,7 +622,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
||||||
require.NotNil(t, state)
|
require.NotNil(t, state)
|
||||||
|
|
||||||
// setup the test logger to use the t.Log
|
// setup the test logger to use the t.Log
|
||||||
state.logger = testutil.TestLogger(t)
|
state.logger = testutil.Logger(t)
|
||||||
|
|
||||||
// setup a new testing cache notifier
|
// setup a new testing cache notifier
|
||||||
cn := newTestCacheNotifier()
|
cn := newTestCacheNotifier()
|
||||||
|
|
|
@ -119,11 +119,11 @@ func (r *rexecWriter) Flush() {
|
||||||
|
|
||||||
// handleRemoteExec is invoked when a new remote exec request is received
|
// handleRemoteExec is invoked when a new remote exec request is received
|
||||||
func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
||||||
a.logger.Printf("[DEBUG] agent: received remote exec event (ID: %s)", msg.ID)
|
a.logger.Debug("received remote exec event", "id", msg.ID)
|
||||||
// Decode the event payload
|
// Decode the event payload
|
||||||
var event remoteExecEvent
|
var event remoteExecEvent
|
||||||
if err := json.Unmarshal(msg.Payload, &event); err != nil {
|
if err := json.Unmarshal(msg.Payload, &event); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed to decode remote exec event: %v", err)
|
a.logger.Error("failed to decode remote exec event", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
||||||
if len(spec.Script) != 0 {
|
if len(spec.Script) != 0 {
|
||||||
tmpFile, err := ioutil.TempFile("", "rexec")
|
tmpFile, err := ioutil.TempFile("", "rexec")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[DEBUG] agent: failed to make tmp file: %v", err)
|
a.logger.Debug("failed to make tmp file", "error", err)
|
||||||
exitCode = 255
|
exitCode = 255
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -161,7 +161,7 @@ func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the exec.Cmd
|
// Create the exec.Cmd
|
||||||
a.logger.Printf("[INFO] agent: remote exec '%s'", script)
|
a.logger.Info("remote exec script", "script", script)
|
||||||
var cmd *osexec.Cmd
|
var cmd *osexec.Cmd
|
||||||
var err error
|
var err error
|
||||||
if len(spec.Args) > 0 {
|
if len(spec.Args) > 0 {
|
||||||
|
@ -170,7 +170,7 @@ func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
||||||
cmd, err = exec.Script(script)
|
cmd, err = exec.Script(script)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[DEBUG] agent: failed to start remote exec: %v", err)
|
a.logger.Debug("failed to start remote exec", "error", err)
|
||||||
exitCode = 255
|
exitCode = 255
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
||||||
|
|
||||||
// Start execution
|
// Start execution
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
a.logger.Printf("[DEBUG] agent: failed to start remote exec: %v", err)
|
a.logger.Debug("failed to start remote exec", "error", err)
|
||||||
exitCode = 255
|
exitCode = 255
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -254,22 +254,22 @@ func (a *Agent) remoteExecGetSpec(event *remoteExecEvent, spec *remoteExecSpec)
|
||||||
var out structs.IndexedDirEntries
|
var out structs.IndexedDirEntries
|
||||||
QUERY:
|
QUERY:
|
||||||
if err := a.RPC("KVS.Get", &get, &out); err != nil {
|
if err := a.RPC("KVS.Get", &get, &out); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed to get remote exec job: %v", err)
|
a.logger.Error("failed to get remote exec job", "error", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if len(out.Entries) == 0 {
|
if len(out.Entries) == 0 {
|
||||||
// If the initial read was stale and had no data, retry as a consistent read
|
// If the initial read was stale and had no data, retry as a consistent read
|
||||||
if get.QueryOptions.AllowStale {
|
if get.QueryOptions.AllowStale {
|
||||||
a.logger.Printf("[DEBUG] agent: trying consistent fetch of remote exec job spec")
|
a.logger.Debug("trying consistent fetch of remote exec job spec")
|
||||||
get.QueryOptions.AllowStale = false
|
get.QueryOptions.AllowStale = false
|
||||||
goto QUERY
|
goto QUERY
|
||||||
} else {
|
} else {
|
||||||
a.logger.Printf("[DEBUG] agent: remote exec aborted, job spec missing")
|
a.logger.Debug("remote exec aborted, job spec missing")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(out.Entries[0].Value, &spec); err != nil {
|
if err := json.Unmarshal(out.Entries[0].Value, &spec); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed to decode remote exec spec: %v", err)
|
a.logger.Error("failed to decode remote exec spec", "error", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -279,7 +279,7 @@ QUERY:
|
||||||
// continue.
|
// continue.
|
||||||
func (a *Agent) remoteExecWriteAck(event *remoteExecEvent) bool {
|
func (a *Agent) remoteExecWriteAck(event *remoteExecEvent) bool {
|
||||||
if err := a.remoteExecWriteKey(event, remoteExecAckSuffix, nil); err != nil {
|
if err := a.remoteExecWriteKey(event, remoteExecAckSuffix, nil); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed to ack remote exec job: %v", err)
|
a.logger.Error("failed to ack remote exec job", "error", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -289,7 +289,7 @@ func (a *Agent) remoteExecWriteAck(event *remoteExecEvent) bool {
|
||||||
func (a *Agent) remoteExecWriteOutput(event *remoteExecEvent, num int, output []byte) bool {
|
func (a *Agent) remoteExecWriteOutput(event *remoteExecEvent, num int, output []byte) bool {
|
||||||
suffix := path.Join(remoteExecOutputDivider, fmt.Sprintf("%05x", num))
|
suffix := path.Join(remoteExecOutputDivider, fmt.Sprintf("%05x", num))
|
||||||
if err := a.remoteExecWriteKey(event, suffix, output); err != nil {
|
if err := a.remoteExecWriteKey(event, suffix, output); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed to write output for remote exec job: %v", err)
|
a.logger.Error("failed to write output for remote exec job", "error", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -299,7 +299,7 @@ func (a *Agent) remoteExecWriteOutput(event *remoteExecEvent, num int, output []
|
||||||
func (a *Agent) remoteExecWriteExitCode(event *remoteExecEvent, exitCode *int) bool {
|
func (a *Agent) remoteExecWriteExitCode(event *remoteExecEvent, exitCode *int) bool {
|
||||||
val := []byte(strconv.FormatInt(int64(*exitCode), 10))
|
val := []byte(strconv.FormatInt(int64(*exitCode), 10))
|
||||||
if err := a.remoteExecWriteKey(event, remoteExecExitSuffix, val); err != nil {
|
if err := a.remoteExecWriteKey(event, remoteExecExitSuffix, val); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: failed to write exit code for remote exec job: %v", err)
|
a.logger.Error("failed to write exit code for remote exec job", "error", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -2,13 +2,13 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
discover "github.com/hashicorp/go-discover"
|
discover "github.com/hashicorp/go-discover"
|
||||||
discoverk8s "github.com/hashicorp/go-discover/provider/k8s"
|
discoverk8s "github.com/hashicorp/go-discover/provider/k8s"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (a *Agent) retryJoinLAN() {
|
func (a *Agent) retryJoinLAN() {
|
||||||
|
@ -18,7 +18,7 @@ func (a *Agent) retryJoinLAN() {
|
||||||
maxAttempts: a.config.RetryJoinMaxAttemptsLAN,
|
maxAttempts: a.config.RetryJoinMaxAttemptsLAN,
|
||||||
interval: a.config.RetryJoinIntervalLAN,
|
interval: a.config.RetryJoinIntervalLAN,
|
||||||
join: a.JoinLAN,
|
join: a.JoinLAN,
|
||||||
logger: a.logger,
|
logger: a.logger.With("cluster", "LAN"),
|
||||||
}
|
}
|
||||||
if err := r.retryJoin(); err != nil {
|
if err := r.retryJoin(); err != nil {
|
||||||
a.retryJoinCh <- err
|
a.retryJoinCh <- err
|
||||||
|
@ -32,7 +32,7 @@ func (a *Agent) retryJoinWAN() {
|
||||||
maxAttempts: a.config.RetryJoinMaxAttemptsWAN,
|
maxAttempts: a.config.RetryJoinMaxAttemptsWAN,
|
||||||
interval: a.config.RetryJoinIntervalWAN,
|
interval: a.config.RetryJoinIntervalWAN,
|
||||||
join: a.JoinWAN,
|
join: a.JoinWAN,
|
||||||
logger: a.logger,
|
logger: a.logger.With("cluster", "WAN"),
|
||||||
}
|
}
|
||||||
if err := r.retryJoin(); err != nil {
|
if err := r.retryJoin(); err != nil {
|
||||||
a.retryJoinCh <- err
|
a.retryJoinCh <- err
|
||||||
|
@ -52,7 +52,7 @@ func newDiscover() (*discover.Discover, error) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func retryJoinAddrs(disco *discover.Discover, cluster string, retryJoin []string, logger *log.Logger) []string {
|
func retryJoinAddrs(disco *discover.Discover, cluster string, retryJoin []string, logger hclog.Logger) []string {
|
||||||
addrs := []string{}
|
addrs := []string{}
|
||||||
if disco == nil {
|
if disco == nil {
|
||||||
return addrs
|
return addrs
|
||||||
|
@ -60,15 +60,23 @@ func retryJoinAddrs(disco *discover.Discover, cluster string, retryJoin []string
|
||||||
for _, addr := range retryJoin {
|
for _, addr := range retryJoin {
|
||||||
switch {
|
switch {
|
||||||
case strings.Contains(addr, "provider="):
|
case strings.Contains(addr, "provider="):
|
||||||
servers, err := disco.Addrs(addr, logger)
|
servers, err := disco.Addrs(addr, logger.StandardLogger(&hclog.StandardLoggerOptions{
|
||||||
|
InferLevels: true,
|
||||||
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if logger != nil {
|
if logger != nil {
|
||||||
logger.Printf("[ERR] agent: Cannot discover %s %s: %s", cluster, addr, err)
|
logger.Error("Cannot discover address",
|
||||||
|
"address", addr,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
addrs = append(addrs, servers...)
|
addrs = append(addrs, servers...)
|
||||||
if logger != nil {
|
if logger != nil {
|
||||||
logger.Printf("[INFO] agent: Discovered %s servers: %s", cluster, strings.Join(servers, " "))
|
logger.Info("Discovered servers",
|
||||||
|
"cluster", cluster,
|
||||||
|
"servers", strings.Join(servers, " "),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +110,7 @@ type retryJoiner struct {
|
||||||
|
|
||||||
// logger is the agent logger. Log messages should contain the
|
// logger is the agent logger. Log messages should contain the
|
||||||
// "agent: " prefix.
|
// "agent: " prefix.
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *retryJoiner) retryJoin() error {
|
func (r *retryJoiner) retryJoin() error {
|
||||||
|
@ -115,15 +123,17 @@ func (r *retryJoiner) retryJoin() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.logger.Printf("[INFO] agent: Retry join %s is supported for: %s", r.cluster, strings.Join(disco.Names(), " "))
|
r.logger.Info("Retry join is supported for the following discovery methods",
|
||||||
r.logger.Printf("[INFO] agent: Joining %s cluster...", r.cluster)
|
"discovery_methods", strings.Join(disco.Names(), " "),
|
||||||
|
)
|
||||||
|
r.logger.Info("Joining cluster...")
|
||||||
attempt := 0
|
attempt := 0
|
||||||
for {
|
for {
|
||||||
addrs := retryJoinAddrs(disco, r.cluster, r.addrs, r.logger)
|
addrs := retryJoinAddrs(disco, r.cluster, r.addrs, r.logger)
|
||||||
if len(addrs) > 0 {
|
if len(addrs) > 0 {
|
||||||
n, err := r.join(addrs)
|
n, err := r.join(addrs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
r.logger.Printf("[INFO] agent: Join %s completed. Synced with %d initial agents", r.cluster, n)
|
r.logger.Info("Join cluster completed. Synced with initial agents", "num_agents", n)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else if len(addrs) == 0 {
|
} else if len(addrs) == 0 {
|
||||||
|
@ -135,7 +145,10 @@ func (r *retryJoiner) retryJoin() error {
|
||||||
return fmt.Errorf("agent: max join %s retry exhausted, exiting", r.cluster)
|
return fmt.Errorf("agent: max join %s retry exhausted, exiting", r.cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.logger.Printf("[WARN] agent: Join %s failed: %v, retrying in %v", r.cluster, err, r.interval)
|
r.logger.Warn("Join cluster failed, will retry",
|
||||||
|
"retry_interval", r.interval,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
time.Sleep(r.interval)
|
time.Sleep(r.interval)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,9 +2,9 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"log"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,10 +46,13 @@ func TestAgentRetryJoinAddrs(t *testing.T) {
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
logger := log.New(&buf, "logger: ", log.Lshortfile)
|
logger := testutil.LoggerWithOutput(t, &buf)
|
||||||
require.Equal(t, test.expected, retryJoinAddrs(d, "LAN", test.input, logger), buf.String())
|
|
||||||
|
output := retryJoinAddrs(d, "LAN", test.input, logger)
|
||||||
|
bufout := buf.String()
|
||||||
|
require.Equal(t, test.expected, output, bufout)
|
||||||
if i == 4 {
|
if i == 4 {
|
||||||
require.Contains(t, buf.String(), `Using provider "aws"`)
|
require.Contains(t, bufout, `Using provider "aws"`)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
package router
|
package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -15,6 +14,8 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -85,7 +86,7 @@ type Manager struct {
|
||||||
// shutdownCh is a copy of the channel in consul.Client
|
// shutdownCh is a copy of the channel in consul.Client
|
||||||
shutdownCh chan struct{}
|
shutdownCh chan struct{}
|
||||||
|
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
// clusterInfo is used to estimate the approximate number of nodes in
|
// clusterInfo is used to estimate the approximate number of nodes in
|
||||||
// a cluster and limit the rate at which it rebalances server
|
// a cluster and limit the rate at which it rebalances server
|
||||||
|
@ -206,7 +207,7 @@ func (m *Manager) FindServer() *metadata.Server {
|
||||||
l := m.getServerList()
|
l := m.getServerList()
|
||||||
numServers := len(l.servers)
|
numServers := len(l.servers)
|
||||||
if numServers == 0 {
|
if numServers == 0 {
|
||||||
m.logger.Printf("[WARN] manager: No servers available")
|
m.logger.Warn("No servers available")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,9 +231,13 @@ func (m *Manager) saveServerList(l serverList) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New is the only way to safely create a new Manager struct.
|
// New is the only way to safely create a new Manager struct.
|
||||||
func New(logger *log.Logger, shutdownCh chan struct{}, clusterInfo ManagerSerfCluster, connPoolPinger Pinger) (m *Manager) {
|
func New(logger hclog.Logger, shutdownCh chan struct{}, clusterInfo ManagerSerfCluster, connPoolPinger Pinger) (m *Manager) {
|
||||||
|
if logger == nil {
|
||||||
|
logger = hclog.New(&hclog.LoggerOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
m = new(Manager)
|
m = new(Manager)
|
||||||
m.logger = logger
|
m.logger = logger.Named(logging.Manager)
|
||||||
m.clusterInfo = clusterInfo // can't pass *consul.Client: import cycle
|
m.clusterInfo = clusterInfo // can't pass *consul.Client: import cycle
|
||||||
m.connPoolPinger = connPoolPinger // can't pass *consul.ConnPool: import cycle
|
m.connPoolPinger = connPoolPinger // can't pass *consul.ConnPool: import cycle
|
||||||
m.rebalanceTimer = time.NewTimer(clientRPCMinReuseDuration)
|
m.rebalanceTimer = time.NewTimer(clientRPCMinReuseDuration)
|
||||||
|
@ -270,7 +275,7 @@ func (m *Manager) NotifyFailedServer(s *metadata.Server) {
|
||||||
if len(l.servers) > 1 && l.servers[0].Name == s.Name {
|
if len(l.servers) > 1 && l.servers[0].Name == s.Name {
|
||||||
l.servers = l.cycleServer()
|
l.servers = l.cycleServer()
|
||||||
m.saveServerList(l)
|
m.saveServerList(l)
|
||||||
m.logger.Printf(`[DEBUG] manager: cycled away from server "%s"`, s.Name)
|
m.logger.Debug("cycled away from server", "server", s.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -315,7 +320,10 @@ func (m *Manager) RebalanceServers() {
|
||||||
foundHealthyServer = true
|
foundHealthyServer = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
m.logger.Printf(`[DEBUG] manager: pinging server "%s" failed: %s`, srv, err)
|
m.logger.Debug("pinging server failed",
|
||||||
|
"server", srv.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
l.servers = l.cycleServer()
|
l.servers = l.cycleServer()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,13 +333,16 @@ func (m *Manager) RebalanceServers() {
|
||||||
atomic.StoreInt32(&m.offline, 0)
|
atomic.StoreInt32(&m.offline, 0)
|
||||||
} else {
|
} else {
|
||||||
atomic.StoreInt32(&m.offline, 1)
|
atomic.StoreInt32(&m.offline, 1)
|
||||||
m.logger.Printf("[DEBUG] manager: No healthy servers during rebalance, aborting")
|
m.logger.Debug("No healthy servers during rebalance, aborting")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that all servers are present
|
// Verify that all servers are present
|
||||||
if m.reconcileServerList(&l) {
|
if m.reconcileServerList(&l) {
|
||||||
m.logger.Printf("[DEBUG] manager: Rebalanced %d servers, next active server is %s", len(l.servers), l.servers[0].String())
|
m.logger.Debug("Rebalanced servers, new active server",
|
||||||
|
"number_of_servers", len(l.servers),
|
||||||
|
"active_server", l.servers[0].String(),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
// reconcileServerList failed because Serf removed the server
|
// reconcileServerList failed because Serf removed the server
|
||||||
// that was at the front of the list that had successfully
|
// that was at the front of the list that had successfully
|
||||||
|
@ -472,7 +483,7 @@ func (m *Manager) Start() {
|
||||||
m.refreshServerRebalanceTimer()
|
m.refreshServerRebalanceTimer()
|
||||||
|
|
||||||
case <-m.shutdownCh:
|
case <-m.shutdownCh:
|
||||||
m.logger.Printf("[INFO] manager: shutting down")
|
m.logger.Info("shutting down")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,28 +3,29 @@ package router
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
localLogger *log.Logger
|
|
||||||
localLogBuffer *bytes.Buffer
|
localLogBuffer *bytes.Buffer
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
localLogBuffer = new(bytes.Buffer)
|
localLogBuffer = new(bytes.Buffer)
|
||||||
localLogger = log.New(localLogBuffer, "", 0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetBufferedLogger() *log.Logger {
|
func GetBufferedLogger() hclog.Logger {
|
||||||
return localLogger
|
localLogBuffer = new(bytes.Buffer)
|
||||||
|
return hclog.New(&hclog.LoggerOptions{
|
||||||
|
Level: 0,
|
||||||
|
Output: localLogBuffer,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type fauxConnPool struct {
|
type fauxConnPool struct {
|
||||||
|
@ -58,7 +59,6 @@ func testManager() (m *Manager) {
|
||||||
|
|
||||||
func testManagerFailProb(failPct float64) (m *Manager) {
|
func testManagerFailProb(failPct float64) (m *Manager) {
|
||||||
logger := GetBufferedLogger()
|
logger := GetBufferedLogger()
|
||||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
|
||||||
shutdownCh := make(chan struct{})
|
shutdownCh := make(chan struct{})
|
||||||
m = New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct})
|
m = New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct})
|
||||||
return m
|
return m
|
||||||
|
@ -129,7 +129,6 @@ func TestManagerInternal_getServerList(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// func New(logger *log.Logger, shutdownCh chan struct{}, clusterInfo ConsulClusterInfo) (m *Manager) {
|
|
||||||
func TestManagerInternal_New(t *testing.T) {
|
func TestManagerInternal_New(t *testing.T) {
|
||||||
m := testManager()
|
m := testManager()
|
||||||
if m == nil {
|
if m == nil {
|
||||||
|
@ -296,7 +295,7 @@ func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
|
||||||
{1000000, 19, 10 * time.Minute},
|
{1000000, 19, 10 * time.Minute},
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := GetBufferedLogger()
|
||||||
shutdownCh := make(chan struct{})
|
shutdownCh := make(chan struct{})
|
||||||
|
|
||||||
for _, s := range clusters {
|
for _, s := range clusters {
|
||||||
|
|
|
@ -2,15 +2,14 @@ package router_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/agent/router"
|
"github.com/hashicorp/consul/agent/router"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fauxAddr struct {
|
type fauxAddr struct {
|
||||||
|
@ -55,22 +54,22 @@ func (s *fauxSerf) NumNodes() int {
|
||||||
return 16384
|
return 16384
|
||||||
}
|
}
|
||||||
|
|
||||||
func testManager() (m *router.Manager) {
|
func testManager(t testing.TB) (m *router.Manager) {
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
shutdownCh := make(chan struct{})
|
shutdownCh := make(chan struct{})
|
||||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{})
|
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{})
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func testManagerFailProb(failPct float64) (m *router.Manager) {
|
func testManagerFailProb(t testing.TB, failPct float64) (m *router.Manager) {
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
shutdownCh := make(chan struct{})
|
shutdownCh := make(chan struct{})
|
||||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct})
|
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct})
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func testManagerFailAddr(failAddr net.Addr) (m *router.Manager) {
|
func testManagerFailAddr(t testing.TB, failAddr net.Addr) (m *router.Manager) {
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
shutdownCh := make(chan struct{})
|
shutdownCh := make(chan struct{})
|
||||||
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failAddr: failAddr})
|
m = router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failAddr: failAddr})
|
||||||
return m
|
return m
|
||||||
|
@ -78,7 +77,7 @@ func testManagerFailAddr(failAddr net.Addr) (m *router.Manager) {
|
||||||
|
|
||||||
// func (m *Manager) AddServer(server *metadata.Server) {
|
// func (m *Manager) AddServer(server *metadata.Server) {
|
||||||
func TestServers_AddServer(t *testing.T) {
|
func TestServers_AddServer(t *testing.T) {
|
||||||
m := testManager()
|
m := testManager(t)
|
||||||
var num int
|
var num int
|
||||||
num = m.NumServers()
|
num = m.NumServers()
|
||||||
if num != 0 {
|
if num != 0 {
|
||||||
|
@ -108,7 +107,7 @@ func TestServers_AddServer(t *testing.T) {
|
||||||
|
|
||||||
// func (m *Manager) IsOffline() bool {
|
// func (m *Manager) IsOffline() bool {
|
||||||
func TestServers_IsOffline(t *testing.T) {
|
func TestServers_IsOffline(t *testing.T) {
|
||||||
m := testManager()
|
m := testManager(t)
|
||||||
if !m.IsOffline() {
|
if !m.IsOffline() {
|
||||||
t.Fatalf("bad")
|
t.Fatalf("bad")
|
||||||
}
|
}
|
||||||
|
@ -129,7 +128,7 @@ func TestServers_IsOffline(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
const failPct = 0.5
|
const failPct = 0.5
|
||||||
m = testManagerFailProb(failPct)
|
m = testManagerFailProb(t, failPct)
|
||||||
m.AddServer(s1)
|
m.AddServer(s1)
|
||||||
var on, off int
|
var on, off int
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
|
@ -147,7 +146,7 @@ func TestServers_IsOffline(t *testing.T) {
|
||||||
|
|
||||||
// func (m *Manager) FindServer() (server *metadata.Server) {
|
// func (m *Manager) FindServer() (server *metadata.Server) {
|
||||||
func TestServers_FindServer(t *testing.T) {
|
func TestServers_FindServer(t *testing.T) {
|
||||||
m := testManager()
|
m := testManager(t)
|
||||||
|
|
||||||
if m.FindServer() != nil {
|
if m.FindServer() != nil {
|
||||||
t.Fatalf("Expected nil return")
|
t.Fatalf("Expected nil return")
|
||||||
|
@ -193,9 +192,8 @@ func TestServers_FindServer(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// func New(logger *log.Logger, shutdownCh chan struct{}) (m *Manager) {
|
|
||||||
func TestServers_New(t *testing.T) {
|
func TestServers_New(t *testing.T) {
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
shutdownCh := make(chan struct{})
|
shutdownCh := make(chan struct{})
|
||||||
m := router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{})
|
m := router.New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{})
|
||||||
if m == nil {
|
if m == nil {
|
||||||
|
@ -205,7 +203,7 @@ func TestServers_New(t *testing.T) {
|
||||||
|
|
||||||
// func (m *Manager) NotifyFailedServer(server *metadata.Server) {
|
// func (m *Manager) NotifyFailedServer(server *metadata.Server) {
|
||||||
func TestServers_NotifyFailedServer(t *testing.T) {
|
func TestServers_NotifyFailedServer(t *testing.T) {
|
||||||
m := testManager()
|
m := testManager(t)
|
||||||
|
|
||||||
if m.NumServers() != 0 {
|
if m.NumServers() != 0 {
|
||||||
t.Fatalf("Expected zero servers to start")
|
t.Fatalf("Expected zero servers to start")
|
||||||
|
@ -258,7 +256,7 @@ func TestServers_NotifyFailedServer(t *testing.T) {
|
||||||
|
|
||||||
// func (m *Manager) NumServers() (numServers int) {
|
// func (m *Manager) NumServers() (numServers int) {
|
||||||
func TestServers_NumServers(t *testing.T) {
|
func TestServers_NumServers(t *testing.T) {
|
||||||
m := testManager()
|
m := testManager(t)
|
||||||
var num int
|
var num int
|
||||||
num = m.NumServers()
|
num = m.NumServers()
|
||||||
if num != 0 {
|
if num != 0 {
|
||||||
|
@ -276,7 +274,7 @@ func TestServers_NumServers(t *testing.T) {
|
||||||
// func (m *Manager) RebalanceServers() {
|
// func (m *Manager) RebalanceServers() {
|
||||||
func TestServers_RebalanceServers(t *testing.T) {
|
func TestServers_RebalanceServers(t *testing.T) {
|
||||||
const failPct = 0.5
|
const failPct = 0.5
|
||||||
m := testManagerFailProb(failPct)
|
m := testManagerFailProb(t, failPct)
|
||||||
const maxServers = 100
|
const maxServers = 100
|
||||||
const numShuffleTests = 100
|
const numShuffleTests = 100
|
||||||
const uniquePassRate = 0.5
|
const uniquePassRate = 0.5
|
||||||
|
@ -323,7 +321,7 @@ func TestServers_RebalanceServers_AvoidFailed(t *testing.T) {
|
||||||
&metadata.Server{Name: "s3", Addr: &fauxAddr{"s3"}},
|
&metadata.Server{Name: "s3", Addr: &fauxAddr{"s3"}},
|
||||||
}
|
}
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
m := testManagerFailAddr(&fauxAddr{"s2"})
|
m := testManagerFailAddr(t, &fauxAddr{"s2"})
|
||||||
for _, s := range servers {
|
for _, s := range servers {
|
||||||
m.AddServer(s)
|
m.AddServer(s)
|
||||||
}
|
}
|
||||||
|
@ -338,7 +336,7 @@ func TestServers_RebalanceServers_AvoidFailed(t *testing.T) {
|
||||||
// func (m *Manager) RemoveServer(server *metadata.Server) {
|
// func (m *Manager) RemoveServer(server *metadata.Server) {
|
||||||
func TestManager_RemoveServer(t *testing.T) {
|
func TestManager_RemoveServer(t *testing.T) {
|
||||||
const nodeNameFmt = "s%02d"
|
const nodeNameFmt = "s%02d"
|
||||||
m := testManager()
|
m := testManager(t)
|
||||||
|
|
||||||
if m.NumServers() != 0 {
|
if m.NumServers() != 0 {
|
||||||
t.Fatalf("Expected zero servers to start")
|
t.Fatalf("Expected zero servers to start")
|
||||||
|
|
|
@ -2,14 +2,15 @@ package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/coordinate"
|
"github.com/hashicorp/serf/coordinate"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
|
@ -19,7 +20,7 @@ import (
|
||||||
// healthy routes to servers by datacenter.
|
// healthy routes to servers by datacenter.
|
||||||
type Router struct {
|
type Router struct {
|
||||||
// logger is used for diagnostic output.
|
// logger is used for diagnostic output.
|
||||||
logger *log.Logger
|
logger hclog.Logger
|
||||||
|
|
||||||
// localDatacenter has the name of the router's home datacenter. This is
|
// localDatacenter has the name of the router's home datacenter. This is
|
||||||
// used to short-circuit RTT calculations for local servers.
|
// used to short-circuit RTT calculations for local servers.
|
||||||
|
@ -82,9 +83,13 @@ type areaInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouter returns a new Router with the given configuration.
|
// NewRouter returns a new Router with the given configuration.
|
||||||
func NewRouter(logger *log.Logger, localDatacenter string) *Router {
|
func NewRouter(logger hclog.Logger, localDatacenter string) *Router {
|
||||||
|
if logger == nil {
|
||||||
|
logger = hclog.New(&hclog.LoggerOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
router := &Router{
|
router := &Router{
|
||||||
logger: logger,
|
logger: logger.Named(logging.Router),
|
||||||
localDatacenter: localDatacenter,
|
localDatacenter: localDatacenter,
|
||||||
areas: make(map[types.AreaID]*areaInfo),
|
areas: make(map[types.AreaID]*areaInfo),
|
||||||
managers: make(map[string][]*Manager),
|
managers: make(map[string][]*Manager),
|
||||||
|
@ -142,8 +147,10 @@ func (r *Router) AddArea(areaID types.AreaID, cluster RouterSerfCluster, pinger
|
||||||
for _, m := range cluster.Members() {
|
for _, m := range cluster.Members() {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, parts := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
r.logger.Printf("[WARN]: consul: Non-server %q in server-only area %q",
|
r.logger.Warn("Non-server in server-only area",
|
||||||
m.Name, areaID)
|
"non_server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,14 +416,18 @@ func (r *Router) GetDatacentersByDistance() ([]string, error) {
|
||||||
for _, m := range info.cluster.Members() {
|
for _, m := range info.cluster.Members() {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, parts := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
r.logger.Printf("[WARN]: consul: Non-server %q in server-only area %q",
|
r.logger.Warn("Non-server in server-only area",
|
||||||
m.Name, areaID)
|
"non_server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Status == serf.StatusLeft {
|
if m.Status == serf.StatusLeft {
|
||||||
r.logger.Printf("[DEBUG]: consul: server %q in area %q left, skipping",
|
r.logger.Debug("server in area left, skipping",
|
||||||
m.Name, areaID)
|
"server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -476,14 +487,18 @@ func (r *Router) GetDatacenterMaps() ([]structs.DatacenterMap, error) {
|
||||||
for _, m := range info.cluster.Members() {
|
for _, m := range info.cluster.Members() {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, parts := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
r.logger.Printf("[WARN]: consul: Non-server %q in server-only area %q",
|
r.logger.Warn("Non-server in server-only area",
|
||||||
m.Name, areaID)
|
"non_server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Status == serf.StatusLeft {
|
if m.Status == serf.StatusLeft {
|
||||||
r.logger.Printf("[DEBUG]: consul: server %q in area %q left, skipping",
|
r.logger.Debug("server in area left, skipping",
|
||||||
m.Name, areaID)
|
"server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,9 +2,7 @@ package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -13,6 +11,7 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
"github.com/hashicorp/serf/coordinate"
|
"github.com/hashicorp/serf/coordinate"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
@ -94,13 +93,13 @@ func testCluster(self string) *mockCluster {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRouter(dc string) *Router {
|
func testRouter(t testing.TB, dc string) *Router {
|
||||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
logger := testutil.Logger(t)
|
||||||
return NewRouter(logger, dc)
|
return NewRouter(logger, dc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouter_Shutdown(t *testing.T) {
|
func TestRouter_Shutdown(t *testing.T) {
|
||||||
r := testRouter("dc0")
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
// Create a WAN-looking area.
|
// Create a WAN-looking area.
|
||||||
self := "node0.dc0"
|
self := "node0.dc0"
|
||||||
|
@ -136,7 +135,7 @@ func TestRouter_Shutdown(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouter_Routing(t *testing.T) {
|
func TestRouter_Routing(t *testing.T) {
|
||||||
r := testRouter("dc0")
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
// Create a WAN-looking area.
|
// Create a WAN-looking area.
|
||||||
self := "node0.dc0"
|
self := "node0.dc0"
|
||||||
|
@ -270,7 +269,7 @@ func TestRouter_Routing(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouter_Routing_Offline(t *testing.T) {
|
func TestRouter_Routing_Offline(t *testing.T) {
|
||||||
r := testRouter("dc0")
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
// Create a WAN-looking area.
|
// Create a WAN-looking area.
|
||||||
self := "node0.dc0"
|
self := "node0.dc0"
|
||||||
|
@ -350,7 +349,7 @@ func TestRouter_Routing_Offline(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouter_GetDatacenters(t *testing.T) {
|
func TestRouter_GetDatacenters(t *testing.T) {
|
||||||
r := testRouter("dc0")
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
self := "node0.dc0"
|
self := "node0.dc0"
|
||||||
wan := testCluster(self)
|
wan := testCluster(self)
|
||||||
|
@ -381,7 +380,7 @@ func TestRouter_distanceSorter(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouter_GetDatacentersByDistance(t *testing.T) {
|
func TestRouter_GetDatacentersByDistance(t *testing.T) {
|
||||||
r := testRouter("dc0")
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
// Start with just the WAN area described in the diagram above.
|
// Start with just the WAN area described in the diagram above.
|
||||||
self := "node0.dc0"
|
self := "node0.dc0"
|
||||||
|
@ -419,7 +418,7 @@ func TestRouter_GetDatacentersByDistance(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRouter_GetDatacenterMaps(t *testing.T) {
|
func TestRouter_GetDatacenterMaps(t *testing.T) {
|
||||||
r := testRouter("dc0")
|
r := testRouter(t, "dc0")
|
||||||
|
|
||||||
self := "node0.dc0"
|
self := "node0.dc0"
|
||||||
wan := testCluster(self)
|
wan := testCluster(self)
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
package router
|
package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/types"
|
"github.com/hashicorp/consul/types"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -13,36 +12,45 @@ type routerFn func(types.AreaID, *metadata.Server) error
|
||||||
|
|
||||||
// handleMemberEvents attempts to apply the given Serf member event to the given
|
// handleMemberEvents attempts to apply the given Serf member event to the given
|
||||||
// router function.
|
// router function.
|
||||||
func handleMemberEvent(logger *log.Logger, fn routerFn, areaID types.AreaID, e serf.Event) {
|
func handleMemberEvent(logger hclog.Logger, fn routerFn, areaID types.AreaID, e serf.Event) {
|
||||||
me, ok := e.(serf.MemberEvent)
|
me, ok := e.(serf.MemberEvent)
|
||||||
if !ok {
|
if !ok {
|
||||||
logger.Printf("[ERR] consul: Bad event type %#v", e)
|
logger.Error("Bad event type", "event", e)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range me.Members {
|
for _, m := range me.Members {
|
||||||
ok, parts := metadata.IsConsulServer(m)
|
ok, parts := metadata.IsConsulServer(m)
|
||||||
if !ok {
|
if !ok {
|
||||||
logger.Printf("[WARN]: consul: Non-server %q in server-only area %q",
|
logger.Warn("Non-server in server-only area",
|
||||||
m.Name, areaID)
|
"non_server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fn(areaID, parts); err != nil {
|
if err := fn(areaID, parts); err != nil {
|
||||||
logger.Printf("[ERR] consul: Failed to process %s event for server %q in area %q: %v",
|
logger.Error("Failed to process event for server in area",
|
||||||
me.Type.String(), m.Name, areaID, err)
|
"event", me.Type.String(),
|
||||||
|
"server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Printf("[INFO] consul: Handled %s event for server %q in area %q",
|
logger.Info("Handled event for server in area",
|
||||||
me.Type.String(), m.Name, areaID)
|
"event", me.Type.String(),
|
||||||
|
"server", m.Name,
|
||||||
|
"area", areaID,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleSerfEvents is a long-running goroutine that pushes incoming events from
|
// HandleSerfEvents is a long-running goroutine that pushes incoming events from
|
||||||
// a Serf manager's channel into the given router. This will return when the
|
// a Serf manager's channel into the given router. This will return when the
|
||||||
// shutdown channel is closed.
|
// shutdown channel is closed.
|
||||||
func HandleSerfEvents(logger *log.Logger, router *Router, areaID types.AreaID, shutdownCh <-chan struct{}, eventCh <-chan serf.Event) {
|
func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, shutdownCh <-chan struct{}, eventCh <-chan serf.Event) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-shutdownCh:
|
case <-shutdownCh:
|
||||||
|
@ -65,7 +73,7 @@ func HandleSerfEvents(logger *log.Logger, router *Router, areaID types.AreaID, s
|
||||||
case serf.EventQuery:
|
case serf.EventQuery:
|
||||||
|
|
||||||
default:
|
default:
|
||||||
logger.Printf("[WARN] consul: Unhandled Serf Event: %#v", e)
|
logger.Warn("Unhandled Serf Event", "event", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,11 +2,11 @@ package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ type FloodPortFn func(*metadata.Server) (int, bool)
|
||||||
// local area are of the form <node> and those in the global area are of the
|
// local area are of the form <node> and those in the global area are of the
|
||||||
// form <node>.<dc> as is done for WAN and general network areas in Consul
|
// form <node>.<dc> as is done for WAN and general network areas in Consul
|
||||||
// Enterprise.
|
// Enterprise.
|
||||||
func FloodJoins(logger *log.Logger, addrFn FloodAddrFn, portFn FloodPortFn,
|
func FloodJoins(logger hclog.Logger, addrFn FloodAddrFn, portFn FloodPortFn,
|
||||||
localDatacenter string, localSerf *serf.Serf, globalSerf *serf.Serf) {
|
localDatacenter string, localSerf *serf.Serf, globalSerf *serf.Serf) {
|
||||||
|
|
||||||
// Names in the global Serf have the datacenter suffixed.
|
// Names in the global Serf have the datacenter suffixed.
|
||||||
|
@ -65,8 +65,11 @@ func FloodJoins(logger *log.Logger, addrFn FloodAddrFn, portFn FloodPortFn,
|
||||||
// get the host part.
|
// get the host part.
|
||||||
addr, _, err := net.SplitHostPort(server.Addr.String())
|
addr, _, err := net.SplitHostPort(server.Addr.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Printf("[DEBUG] consul: Failed to flood-join %q (bad address %q): %v",
|
logger.Debug("Failed to flood-join server (bad address)",
|
||||||
server.Name, server.Addr.String(), err)
|
"server", server.Name,
|
||||||
|
"address", server.Addr.String(),
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if addrFn != nil {
|
if addrFn != nil {
|
||||||
if a, ok := addrFn(server); ok {
|
if a, ok := addrFn(server); ok {
|
||||||
|
@ -86,18 +89,23 @@ func FloodJoins(logger *log.Logger, addrFn FloodAddrFn, portFn FloodPortFn,
|
||||||
addr = fmt.Sprintf("[%s]", addr)
|
addr = fmt.Sprintf("[%s]", addr)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.Printf("[DEBUG] consul: Failed to parse IP %s", addr)
|
logger.Debug("Failed to parse IP", "ip", addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the join!
|
// Do the join!
|
||||||
n, err := globalSerf.Join([]string{addr}, true)
|
n, err := globalSerf.Join([]string{addr}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Printf("[DEBUG] consul: Failed to flood-join %q at %s: %v",
|
logger.Debug("Failed to flood-join server at address",
|
||||||
server.Name, addr, err)
|
"server", server.Name,
|
||||||
|
"address", addr,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
} else if n > 0 {
|
} else if n > 0 {
|
||||||
logger.Printf("[DEBUG] consul: Successfully performed flood-join for %q at %s",
|
logger.Debug("Successfully performed flood-join for server at address",
|
||||||
server.Name, addr)
|
"server", server.Name,
|
||||||
|
"address", addr,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,9 +186,9 @@ func (s *ServiceManager) AddService(req *addServiceRequest) error {
|
||||||
s.services[sid] = watch
|
s.services[sid] = watch
|
||||||
|
|
||||||
if updating {
|
if updating {
|
||||||
s.agent.logger.Printf("[DEBUG] agent.manager: updated local registration for service %q", service.ID)
|
s.agent.logger.Debug("updated local registration for service", "service", service.ID)
|
||||||
} else {
|
} else {
|
||||||
s.agent.logger.Printf("[DEBUG] agent.manager: added local registration for service %q", service.ID)
|
s.agent.logger.Debug("added local registration for service", "service", service.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -365,7 +365,7 @@ func (w *serviceConfigWatch) runWatch(wg *sync.WaitGroup) {
|
||||||
return
|
return
|
||||||
case event := <-w.updateCh:
|
case event := <-w.updateCh:
|
||||||
if err := w.handleUpdate(event); err != nil {
|
if err := w.handleUpdate(event); err != nil {
|
||||||
w.agent.logger.Printf("[ERR] agent.manager: error handling service update: %v", err)
|
w.agent.logger.Error("error handling service update", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package structs
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
@ -469,7 +468,7 @@ func (p *ExposePath) ToAPI() api.ExposePath {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalize validates ExposeConfig and sets default values
|
// Finalize validates ExposeConfig and sets default values
|
||||||
func (e *ExposeConfig) Finalize(l *log.Logger) {
|
func (e *ExposeConfig) Finalize() {
|
||||||
for i := 0; i < len(e.Paths); i++ {
|
for i := 0; i < len(e.Paths); i++ {
|
||||||
path := &e.Paths[i]
|
path := &e.Paths[i]
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
|
@ -17,6 +16,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metrics "github.com/armon/go-metrics"
|
metrics "github.com/armon/go-metrics"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
uuid "github.com/hashicorp/go-uuid"
|
uuid "github.com/hashicorp/go-uuid"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
|
@ -24,7 +24,6 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul"
|
"github.com/hashicorp/consul/agent/consul"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/logger"
|
|
||||||
"github.com/hashicorp/consul/sdk/freeport"
|
"github.com/hashicorp/consul/sdk/freeport"
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
@ -62,9 +61,6 @@ type TestAgent struct {
|
||||||
// to os.Stderr.
|
// to os.Stderr.
|
||||||
LogOutput io.Writer
|
LogOutput io.Writer
|
||||||
|
|
||||||
// LogWriter is used for streaming logs.
|
|
||||||
LogWriter *logger.LogWriter
|
|
||||||
|
|
||||||
// DataDir is the data directory which is used when Config.DataDir
|
// DataDir is the data directory which is used when Config.DataDir
|
||||||
// is not set. It is created automatically and removed when
|
// is not set. It is created automatically and removed when
|
||||||
// Shutdown() is called.
|
// Shutdown() is called.
|
||||||
|
@ -160,11 +156,16 @@ func (a *TestAgent) Start() (err error) {
|
||||||
if logOutput == nil {
|
if logOutput == nil {
|
||||||
logOutput = os.Stderr
|
logOutput = os.Stderr
|
||||||
}
|
}
|
||||||
agentLogger := log.New(logOutput, a.Name+" - ", log.LstdFlags|log.Lmicroseconds)
|
|
||||||
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
|
Name: a.Name,
|
||||||
|
Level: hclog.Debug,
|
||||||
|
Output: logOutput,
|
||||||
|
})
|
||||||
|
|
||||||
portsConfig, returnPortsFn := randomPortsSource(a.UseTLS)
|
portsConfig, returnPortsFn := randomPortsSource(a.UseTLS)
|
||||||
a.returnPortsFn = returnPortsFn
|
a.returnPortsFn = returnPortsFn
|
||||||
a.Config = TestConfig(agentLogger,
|
a.Config = TestConfig(logger,
|
||||||
portsConfig,
|
portsConfig,
|
||||||
config.Source{Name: a.Name, Format: "hcl", Data: a.HCL},
|
config.Source{Name: a.Name, Format: "hcl", Data: a.HCL},
|
||||||
config.Source{Name: a.Name + ".data_dir", Format: "hcl", Data: hclDataDir},
|
config.Source{Name: a.Name + ".data_dir", Format: "hcl", Data: hclDataDir},
|
||||||
|
@ -197,14 +198,13 @@ func (a *TestAgent) Start() (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
agent, err := New(a.Config, agentLogger)
|
agent, err := New(a.Config, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanupTmpDir()
|
cleanupTmpDir()
|
||||||
return fmt.Errorf("Error creating agent: %s", err)
|
return fmt.Errorf("Error creating agent: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
agent.LogOutput = logOutput
|
agent.LogOutput = logOutput
|
||||||
agent.LogWriter = a.LogWriter
|
|
||||||
agent.MemSink = metrics.NewInmemSink(1*time.Second, time.Minute)
|
agent.MemSink = metrics.NewInmemSink(1*time.Second, time.Minute)
|
||||||
|
|
||||||
id := string(a.Config.NodeID)
|
id := string(a.Config.NodeID)
|
||||||
|
@ -411,7 +411,7 @@ func NodeID() string {
|
||||||
|
|
||||||
// TestConfig returns a unique default configuration for testing an
|
// TestConfig returns a unique default configuration for testing an
|
||||||
// agent.
|
// agent.
|
||||||
func TestConfig(logger *log.Logger, sources ...config.Source) *config.RuntimeConfig {
|
func TestConfig(logger hclog.Logger, sources ...config.Source) *config.RuntimeConfig {
|
||||||
nodeID := NodeID()
|
nodeID := NodeID()
|
||||||
testsrc := config.Source{
|
testsrc := config.Source{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
|
@ -450,7 +450,7 @@ func TestConfig(logger *log.Logger, sources ...config.Source) *config.RuntimeCon
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, w := range b.Warnings {
|
for _, w := range b.Warnings {
|
||||||
logger.Printf("[WARN] %s", w)
|
logger.Warn(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Effectively disables the delay after root rotation before requesting CSRs
|
// Effectively disables the delay after root rotation before requesting CSRs
|
||||||
|
|
|
@ -113,7 +113,7 @@ func (a *Agent) handleEvents() {
|
||||||
// Decode the event
|
// Decode the event
|
||||||
msg := new(UserEvent)
|
msg := new(UserEvent)
|
||||||
if err := decodeMsgPack(e.Payload, msg); err != nil {
|
if err := decodeMsgPack(e.Payload, msg); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to decode event: %v", err)
|
a.logger.Error("Failed to decode event", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
msg.LTime = uint64(e.LTime)
|
msg.LTime = uint64(e.LTime)
|
||||||
|
@ -136,16 +136,21 @@ func (a *Agent) handleEvents() {
|
||||||
func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool {
|
func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool {
|
||||||
// Check the version
|
// Check the version
|
||||||
if msg.Version > userEventMaxVersion {
|
if msg.Version > userEventMaxVersion {
|
||||||
a.logger.Printf("[WARN] agent: Event version %d may have unsupported features (%s)",
|
a.logger.Warn("Event version may have unsupported features",
|
||||||
msg.Version, msg.Name)
|
"version", msg.Version,
|
||||||
|
"event", msg.Name,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply the filters
|
// Apply the filters
|
||||||
if msg.NodeFilter != "" {
|
if msg.NodeFilter != "" {
|
||||||
re, err := regexp.Compile(msg.NodeFilter)
|
re, err := regexp.Compile(msg.NodeFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to parse node filter '%s' for event '%s': %v",
|
a.logger.Error("Failed to parse node filter for event",
|
||||||
msg.NodeFilter, msg.Name, err)
|
"filter", msg.NodeFilter,
|
||||||
|
"event", msg.Name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !re.MatchString(a.config.NodeName) {
|
if !re.MatchString(a.config.NodeName) {
|
||||||
|
@ -156,8 +161,11 @@ func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool {
|
||||||
if msg.ServiceFilter != "" {
|
if msg.ServiceFilter != "" {
|
||||||
re, err := regexp.Compile(msg.ServiceFilter)
|
re, err := regexp.Compile(msg.ServiceFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to parse service filter '%s' for event '%s': %v",
|
a.logger.Error("Failed to parse service filter for event",
|
||||||
msg.ServiceFilter, msg.Name, err)
|
"filter", msg.ServiceFilter,
|
||||||
|
"event", msg.Name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,8 +173,11 @@ func (a *Agent) shouldProcessUserEvent(msg *UserEvent) bool {
|
||||||
if msg.TagFilter != "" {
|
if msg.TagFilter != "" {
|
||||||
re, err := regexp.Compile(msg.TagFilter)
|
re, err := regexp.Compile(msg.TagFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to parse tag filter '%s' for event '%s': %v",
|
a.logger.Error("Failed to parse tag filter for event",
|
||||||
msg.TagFilter, msg.Name, err)
|
"filter", msg.TagFilter,
|
||||||
|
"event", msg.Name,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
tagRe = re
|
tagRe = re
|
||||||
|
@ -210,13 +221,19 @@ func (a *Agent) ingestUserEvent(msg *UserEvent) {
|
||||||
switch msg.Name {
|
switch msg.Name {
|
||||||
case remoteExecName:
|
case remoteExecName:
|
||||||
if a.config.DisableRemoteExec {
|
if a.config.DisableRemoteExec {
|
||||||
a.logger.Printf("[INFO] agent: ignoring remote exec event (%s), disabled.", msg.ID)
|
a.logger.Info("ignoring remote exec event, disabled.",
|
||||||
|
"event_name", msg.Name,
|
||||||
|
"event_id", msg.ID,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
go a.handleRemoteExec(msg)
|
go a.handleRemoteExec(msg)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
a.logger.Printf("[DEBUG] agent: new event: %s (%s)", msg.Name, msg.ID)
|
a.logger.Debug("new event",
|
||||||
|
"event_name", msg.Name,
|
||||||
|
"event_id", msg.ID,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
a.eventLock.Lock()
|
a.eventLock.Lock()
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
osexec "os/exec"
|
osexec "os/exec"
|
||||||
|
@ -16,6 +15,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/exec"
|
"github.com/hashicorp/consul/agent/exec"
|
||||||
"github.com/hashicorp/consul/api/watch"
|
"github.com/hashicorp/consul/api/watch"
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// makeWatchHandler returns a handler for the given watch
|
// makeWatchHandler returns a handler for the given watch
|
||||||
func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFunc {
|
func makeWatchHandler(logger hclog.Logger, handler interface{}) watch.HandlerFunc {
|
||||||
var args []string
|
var args []string
|
||||||
var script string
|
var script string
|
||||||
|
|
||||||
|
@ -41,7 +41,6 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun
|
||||||
panic(fmt.Errorf("unknown handler type %T", handler))
|
panic(fmt.Errorf("unknown handler type %T", handler))
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := log.New(logOutput, "", log.LstdFlags)
|
|
||||||
fn := func(idx uint64, data interface{}) {
|
fn := func(idx uint64, data interface{}) {
|
||||||
// Create the command
|
// Create the command
|
||||||
var cmd *osexec.Cmd
|
var cmd *osexec.Cmd
|
||||||
|
@ -53,7 +52,7 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun
|
||||||
cmd, err = exec.Script(script)
|
cmd, err = exec.Script(script)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Printf("[ERR] agent: Failed to setup watch: %v", err)
|
logger.Error("Failed to setup watch", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,14 +69,20 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun
|
||||||
var inp bytes.Buffer
|
var inp bytes.Buffer
|
||||||
enc := json.NewEncoder(&inp)
|
enc := json.NewEncoder(&inp)
|
||||||
if err := enc.Encode(data); err != nil {
|
if err := enc.Encode(data); err != nil {
|
||||||
logger.Printf("[ERR] agent: Failed to encode data for watch '%v': %v", handler, err)
|
logger.Error("Failed to encode data for watch",
|
||||||
|
"watch", handler,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cmd.Stdin = &inp
|
cmd.Stdin = &inp
|
||||||
|
|
||||||
// Run the handler
|
// Run the handler
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
logger.Printf("[ERR] agent: Failed to run watch handler '%v': %v", handler, err)
|
logger.Error("Failed to run watch handler",
|
||||||
|
"watch_handler", handler,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the output, add a message about truncation
|
// Get the output, add a message about truncation
|
||||||
|
@ -88,14 +93,15 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log the output
|
// Log the output
|
||||||
logger.Printf("[DEBUG] agent: watch handler '%v' output: %s", handler, outputStr)
|
logger.Debug("watch handler output",
|
||||||
|
"watch_handler", handler,
|
||||||
|
"output", outputStr,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return fn
|
return fn
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig) watch.HandlerFunc {
|
func makeHTTPWatchHandler(logger hclog.Logger, config *watch.HttpHandlerConfig) watch.HandlerFunc {
|
||||||
logger := log.New(logOutput, "", log.LstdFlags)
|
|
||||||
|
|
||||||
fn := func(idx uint64, data interface{}) {
|
fn := func(idx uint64, data interface{}) {
|
||||||
trans := cleanhttp.DefaultTransport()
|
trans := cleanhttp.DefaultTransport()
|
||||||
|
|
||||||
|
@ -121,13 +127,16 @@ func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig)
|
||||||
var inp bytes.Buffer
|
var inp bytes.Buffer
|
||||||
enc := json.NewEncoder(&inp)
|
enc := json.NewEncoder(&inp)
|
||||||
if err := enc.Encode(data); err != nil {
|
if err := enc.Encode(data); err != nil {
|
||||||
logger.Printf("[ERR] agent: Failed to encode data for http watch '%s': %v", config.Path, err)
|
logger.Error("Failed to encode data for http watch",
|
||||||
|
"watch", config.Path,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest(config.Method, config.Path, &inp)
|
req, err := http.NewRequest(config.Method, config.Path, &inp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Printf("[ERR] agent: Failed to setup http watch: %v", err)
|
logger.Error("Failed to setup http watch", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
@ -140,7 +149,10 @@ func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig)
|
||||||
}
|
}
|
||||||
resp, err := httpClient.Do(req)
|
resp, err := httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Printf("[ERR] agent: Failed to invoke http watch handler '%s': %v", config.Path, err)
|
logger.Error("Failed to invoke http watch handler",
|
||||||
|
"watch", config.Path,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
@ -158,10 +170,16 @@ func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig)
|
||||||
|
|
||||||
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
|
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
|
||||||
// Log the output
|
// Log the output
|
||||||
logger.Printf("[TRACE] agent: http watch handler '%s' output: %s", config.Path, outputStr)
|
logger.Trace("http watch handler output",
|
||||||
|
"watch", config.Path,
|
||||||
|
"output", outputStr,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
logger.Printf("[ERR] agent: http watch handler '%s' got '%s' with output: %s",
|
logger.Error("http watch handler failed with output",
|
||||||
config.Path, resp.Status, outputStr)
|
"watch", config.Path,
|
||||||
|
"status", resp.Status,
|
||||||
|
"output", outputStr,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fn
|
return fn
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api/watch"
|
"github.com/hashicorp/consul/api/watch"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMakeWatchHandler(t *testing.T) {
|
func TestMakeWatchHandler(t *testing.T) {
|
||||||
|
@ -16,7 +17,7 @@ func TestMakeWatchHandler(t *testing.T) {
|
||||||
defer os.Remove("handler_out")
|
defer os.Remove("handler_out")
|
||||||
defer os.Remove("handler_index_out")
|
defer os.Remove("handler_index_out")
|
||||||
script := "bash -c 'echo $CONSUL_INDEX >> handler_index_out && cat >> handler_out'"
|
script := "bash -c 'echo $CONSUL_INDEX >> handler_index_out && cat >> handler_out'"
|
||||||
handler := makeWatchHandler(os.Stderr, script)
|
handler := makeWatchHandler(testutil.Logger(t), script)
|
||||||
handler(100, []string{"foo", "bar", "baz"})
|
handler(100, []string{"foo", "bar", "baz"})
|
||||||
raw, err := ioutil.ReadFile("handler_out")
|
raw, err := ioutil.ReadFile("handler_out")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -61,6 +62,6 @@ func TestMakeHTTPWatchHandler(t *testing.T) {
|
||||||
Header: map[string][]string{"X-Custom": {"abc", "def"}},
|
Header: map[string][]string{"X-Custom": {"abc", "def"}},
|
||||||
Timeout: time.Minute,
|
Timeout: time.Minute,
|
||||||
}
|
}
|
||||||
handler := makeHTTPWatchHandler(os.Stderr, &config)
|
handler := makeHTTPWatchHandler(testutil.Logger(t), &config)
|
||||||
handler(100, []string{"foo", "bar", "baz"})
|
handler(100, []string{"foo", "bar", "baz"})
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue