From 673bd69f36c8a09daee7c552310a1480aff086bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Cruz?= Date: Fri, 21 Aug 2020 19:05:25 +0100 Subject: [PATCH 01/73] Decrease test flakiness Fix flaky TestACLResolver_Client/Concurrent-Token-Resolve and TestCacheNotifyPolling --- agent/cache/testing.go | 4 ++-- agent/cache/watch_test.go | 2 +- agent/consul/acl_test.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/agent/cache/testing.go b/agent/cache/testing.go index c96612d94..90a0d7bef 100644 --- a/agent/cache/testing.go +++ b/agent/cache/testing.go @@ -60,7 +60,7 @@ func TestCacheNotifyChResult(t testing.T, ch <-chan UpdateEvent, expected ...Upd } got := make([]UpdateEvent, 0, expectLen) - timeoutCh := time.After(50 * time.Millisecond) + timeoutCh := time.After(75 * time.Millisecond) OUT: for { @@ -74,7 +74,7 @@ OUT: } case <-timeoutCh: - t.Fatalf("got %d results on chan in 50ms, want %d", len(got), expectLen) + t.Fatalf("timeout while waiting for result: got %d results on chan, want %d", len(got), expectLen) } } diff --git a/agent/cache/watch_test.go b/agent/cache/watch_test.go index 771a78359..a0bc7be75 100644 --- a/agent/cache/watch_test.go +++ b/agent/cache/watch_test.go @@ -258,7 +258,7 @@ func TestCacheNotifyPolling(t *testing.T) { } require.Equal(events[0].Result, 42) - require.Equal(events[0].Meta.Hit, false) + require.Equal(events[0].Meta.Hit && events[1].Meta.Hit, false) require.Equal(events[0].Meta.Index, uint64(1)) require.True(events[0].Meta.Age < 50*time.Millisecond) require.NoError(events[0].Err) diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 8e47e0032..e2cc884e6 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -1639,8 +1639,8 @@ func TestACLResolver_Client(t *testing.T) { // effectively disable caching - so the only way we end up with 1 token read is if they were // being resolved concurrently config.Config.ACLTokenTTL = 0 * time.Second - config.Config.ACLPolicyTTL = 30 * time.Millisecond - config.Config.ACLRoleTTL = 30 * time.Millisecond + config.Config.ACLPolicyTTL = 30 * time.Second + config.Config.ACLRoleTTL = 30 * time.Second config.Config.ACLDownPolicy = "extend-cache" }) From dd385f05e6297c5372663b7896d6fd084e823a50 Mon Sep 17 00:00:00 2001 From: Pierre Souchay Date: Mon, 24 Aug 2020 23:33:10 +0200 Subject: [PATCH 02/73] Ensure that Cache options are reloaded when `consul reload` is performed. This will apply cache throttling parameters are properly applied: * cache.EntryFetchMaxBurst * cache.EntryFetchRate When values are updated, a log is displayed in info. --- agent/agent.go | 6 ++++++ agent/cache/cache.go | 33 ++++++++++++++++++++++++++++++--- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index bc676b0bf..ab240e0af 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -3748,6 +3748,12 @@ func (a *Agent) reloadConfigInternal(newCfg *config.RuntimeConfig) error { return err } + if a.cache.ReloadOptions(newCfg.Cache) { + a.logger.Info("Cache options have been updated") + } else { + a.logger.Debug("Cache options have not been modified") + } + // Update filtered metrics metrics.UpdateFilter(newCfg.Telemetry.AllowedPrefixes, newCfg.Telemetry.BlockedPrefixes) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 44aeb3a04..ed32527e4 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -144,16 +144,21 @@ type Options struct { EntryFetchRate rate.Limit } -// New creates a new cache with the given RPC client and reasonable defaults. -// Further settings can be tweaked on the returned value. -func New(options Options) *Cache { +// applyDefaultValuesOnOptions set default values on options and returned updated value +func applyDefaultValuesOnOptions(options Options) Options { if options.EntryFetchRate == 0.0 { options.EntryFetchRate = DefaultEntryFetchRate } if options.EntryFetchMaxBurst == 0 { options.EntryFetchMaxBurst = DefaultEntryFetchMaxBurst } + return options +} +// New creates a new cache with the given RPC client and reasonable defaults. +// Further settings can be tweaked on the returned value. +func New(options Options) *Cache { + options = applyDefaultValuesOnOptions(options) // Initialize the heap. The buffer of 1 is really important because // its possible for the expiry loop to trigger the heap to update // itself and it'd block forever otherwise. @@ -234,6 +239,28 @@ func (c *Cache) RegisterType(n string, typ Type) { c.types[n] = typeEntry{Name: n, Type: typ, Opts: &opts} } +// ReloadOptions updates the cache with the new options +// return true if Cache is updated, false if already up to date +func (c *Cache) ReloadOptions(options Options) bool { + options = applyDefaultValuesOnOptions(options) + if c.options.EntryFetchRate != options.EntryFetchRate || c.options.EntryFetchMaxBurst != options.EntryFetchMaxBurst { + c.entriesLock.RLock() + defer c.entriesLock.RUnlock() + for _, entry := range c.entries { + if c.options.EntryFetchRate != options.EntryFetchRate { + entry.FetchRateLimiter.SetLimit(options.EntryFetchRate) + } + if c.options.EntryFetchMaxBurst != options.EntryFetchMaxBurst { + entry.FetchRateLimiter.SetBurst(options.EntryFetchMaxBurst) + } + } + c.options.EntryFetchRate = options.EntryFetchRate + c.options.EntryFetchMaxBurst = options.EntryFetchMaxBurst + return true + } + return false +} + // Get loads the data for the given type and request. If data satisfying the // minimum index is present in the cache, it is returned immediately. Otherwise, // this will block until the data is available or the request timeout is From 084d0e8015146567b56b2acfac9e7662a6eaf2f4 Mon Sep 17 00:00:00 2001 From: Pierre Souchay Date: Thu, 27 Aug 2020 08:51:19 +0200 Subject: [PATCH 03/73] Added `options.Equals()` and minor fixes indentation fixes --- agent/cache/cache.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index ed32527e4..b92feb5a5 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -144,6 +144,11 @@ type Options struct { EntryFetchRate rate.Limit } +// Equal return true if both options are equivalent +func (o Options) Equal(other Options) bool { + return o.EntryFetchMaxBurst == other.EntryFetchMaxBurst && o.EntryFetchRate == other.EntryFetchRate +} + // applyDefaultValuesOnOptions set default values on options and returned updated value func applyDefaultValuesOnOptions(options Options) Options { if options.EntryFetchRate == 0.0 { @@ -243,7 +248,8 @@ func (c *Cache) RegisterType(n string, typ Type) { // return true if Cache is updated, false if already up to date func (c *Cache) ReloadOptions(options Options) bool { options = applyDefaultValuesOnOptions(options) - if c.options.EntryFetchRate != options.EntryFetchRate || c.options.EntryFetchMaxBurst != options.EntryFetchMaxBurst { + modified := !options.Equal(c.options) + if modified { c.entriesLock.RLock() defer c.entriesLock.RUnlock() for _, entry := range c.entries { @@ -256,9 +262,8 @@ func (c *Cache) ReloadOptions(options Options) bool { } c.options.EntryFetchRate = options.EntryFetchRate c.options.EntryFetchMaxBurst = options.EntryFetchMaxBurst - return true } - return false + return modified } // Get loads the data for the given type and request. If data satisfying the From 4983e093a00e9b9e9470ac8f0b5e69e721f8f54c Mon Sep 17 00:00:00 2001 From: Pierre Souchay Date: Thu, 27 Aug 2020 16:41:20 +0200 Subject: [PATCH 04/73] Tests that changes in rate limit are taken into account by agent --- agent/agent_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/agent/agent_test.go b/agent/agent_test.go index 94bb74f7b..33dfa311e 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -43,6 +43,7 @@ import ( "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/time/rate" "gopkg.in/square/go-jose.v2/jwt" ) @@ -765,10 +766,15 @@ func TestCacheRateLimit(test *testing.T) { test.Run(fmt.Sprintf("rate_limit_at_%v", currentTest.rateLimit), func(t *testing.T) { tt := currentTest t.Parallel() - a := NewTestAgent(t, fmt.Sprintf("cache = { entry_fetch_rate = %v, entry_fetch_max_burst = 1 }", tt.rateLimit)) + a := NewTestAgent(t, "cache = { entry_fetch_rate = 1, entry_fetch_max_burst = 1 }") defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") + cfg := a.config + require.Equal(t, rate.Limit(1), a.config.Cache.EntryFetchRate) + cfg.Cache.EntryFetchRate = rate.Limit(tt.rateLimit) + a.reloadConfigInternal(cfg) + require.Equal(t, rate.Limit(tt.rateLimit), a.config.Cache.EntryFetchRate) var wg sync.WaitGroup stillProcessing := true From 85de324a867c09417c86b8aeb793c386e6627fbe Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Thu, 27 Aug 2020 11:53:49 -0400 Subject: [PATCH 05/73] Retroactively add changelog for PR 8537 --- .changelog/8537.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/8537.txt diff --git a/.changelog/8537.txt b/.changelog/8537.txt new file mode 100644 index 000000000..f2b92b26b --- /dev/null +++ b/.changelog/8537.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Fixed a panic caused by an api request with Connect=null +``` From f92ae5e6ca894ae25a669b29dc383925f81c0ca0 Mon Sep 17 00:00:00 2001 From: Pierre Souchay Date: Thu, 27 Aug 2020 18:14:05 +0200 Subject: [PATCH 06/73] Also test reload of EntryFetchMaxBurst --- agent/agent_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/agent/agent_test.go b/agent/agent_test.go index 33dfa311e..71c6e8741 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -766,15 +766,18 @@ func TestCacheRateLimit(test *testing.T) { test.Run(fmt.Sprintf("rate_limit_at_%v", currentTest.rateLimit), func(t *testing.T) { tt := currentTest t.Parallel() - a := NewTestAgent(t, "cache = { entry_fetch_rate = 1, entry_fetch_max_burst = 1 }") + a := NewTestAgent(t, "cache = { entry_fetch_rate = 1, entry_fetch_max_burst = 100 }") defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") cfg := a.config require.Equal(t, rate.Limit(1), a.config.Cache.EntryFetchRate) + require.Equal(t, 100, a.config.Cache.EntryFetchMaxBurst) cfg.Cache.EntryFetchRate = rate.Limit(tt.rateLimit) + cfg.Cache.EntryFetchMaxBurst = 1 a.reloadConfigInternal(cfg) require.Equal(t, rate.Limit(tt.rateLimit), a.config.Cache.EntryFetchRate) + require.Equal(t, 1, a.config.Cache.EntryFetchMaxBurst) var wg sync.WaitGroup stillProcessing := true From a8256b84bcf1e78d8421b9780a06f6ce4097e147 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 12 May 2020 14:27:50 -0400 Subject: [PATCH 07/73] logging: Remove t.Parallel from tests The tests all run fast enough that we do not get any advantage from using Parallel. The one test that was slow used a long sleep. Changing the sleep to a few milliseconds speeds up the test considerably. --- logging/logfile_test.go | 10 ++-------- logging/logger_test.go | 9 --------- logging/monitor/monitor_test.go | 5 ----- 3 files changed, 2 insertions(+), 22 deletions(-) diff --git a/logging/logfile_test.go b/logging/logfile_test.go index c1343d055..115f1bcff 100644 --- a/logging/logfile_test.go +++ b/logging/logfile_test.go @@ -11,12 +11,11 @@ import ( const ( testFileName = "Consul.log" - testDuration = 2 * time.Second + testDuration = 50 * time.Millisecond testBytes = 10 ) func TestLogFile_timeRotation(t *testing.T) { - t.Parallel() tempDir := testutil.TempDir(t, "LogWriterTime") logFile := LogFile{ fileName: testFileName, @@ -24,7 +23,7 @@ func TestLogFile_timeRotation(t *testing.T) { duration: testDuration, } logFile.Write([]byte("Hello World")) - time.Sleep(2 * time.Second) + time.Sleep(3 * testDuration) logFile.Write([]byte("Second File")) want := 2 if got, _ := ioutil.ReadDir(tempDir); len(got) != want { @@ -33,7 +32,6 @@ func TestLogFile_timeRotation(t *testing.T) { } func TestLogFile_openNew(t *testing.T) { - t.Parallel() tempDir := testutil.TempDir(t, "LogWriterOpen") logFile := LogFile{fileName: testFileName, logPath: tempDir, duration: testDuration} if err := logFile.openNew(); err != nil { @@ -46,7 +44,6 @@ func TestLogFile_openNew(t *testing.T) { } func TestLogFile_byteRotation(t *testing.T) { - t.Parallel() tempDir := testutil.TempDir(t, "LogWriterBytes") logFile := LogFile{ fileName: testFileName, @@ -64,7 +61,6 @@ func TestLogFile_byteRotation(t *testing.T) { } func TestLogFile_deleteArchives(t *testing.T) { - t.Parallel() tempDir := testutil.TempDir(t, "LogWriteDeleteArchives") logFile := LogFile{ fileName: testFileName, @@ -100,7 +96,6 @@ func TestLogFile_deleteArchives(t *testing.T) { } func TestLogFile_deleteArchivesDisabled(t *testing.T) { - t.Parallel() tempDir := testutil.TempDir(t, t.Name()) logFile := LogFile{ fileName: testFileName, @@ -121,7 +116,6 @@ func TestLogFile_deleteArchivesDisabled(t *testing.T) { } func TestLogFile_rotationDisabled(t *testing.T) { - t.Parallel() tempDir := testutil.TempDir(t, t.Name()) logFile := LogFile{ fileName: testFileName, diff --git a/logging/logger_test.go b/logging/logger_test.go index c6bea0919..babff2a1a 100644 --- a/logging/logger_test.go +++ b/logging/logger_test.go @@ -12,7 +12,6 @@ import ( ) func TestLogger_SetupBasic(t *testing.T) { - t.Parallel() require := require.New(t) cfg := Config{LogLevel: "INFO"} @@ -22,7 +21,6 @@ func TestLogger_SetupBasic(t *testing.T) { } func TestLogger_SetupInvalidLogLevel(t *testing.T) { - t.Parallel() cfg := Config{} _, err := Setup(cfg, nil) @@ -30,7 +28,6 @@ func TestLogger_SetupInvalidLogLevel(t *testing.T) { } func TestLogger_SetupLoggerErrorLevel(t *testing.T) { - t.Parallel() cases := []struct { desc string @@ -74,7 +71,6 @@ func TestLogger_SetupLoggerErrorLevel(t *testing.T) { } func TestLogger_SetupLoggerDebugLevel(t *testing.T) { - t.Parallel() require := require.New(t) cfg := Config{LogLevel: "DEBUG"} var buf bytes.Buffer @@ -93,7 +89,6 @@ func TestLogger_SetupLoggerDebugLevel(t *testing.T) { } func TestLogger_SetupLoggerWithName(t *testing.T) { - t.Parallel() require := require.New(t) cfg := Config{ LogLevel: "DEBUG", @@ -111,7 +106,6 @@ func TestLogger_SetupLoggerWithName(t *testing.T) { } func TestLogger_SetupLoggerWithJSON(t *testing.T) { - t.Parallel() require := require.New(t) cfg := Config{ LogLevel: "DEBUG", @@ -136,7 +130,6 @@ func TestLogger_SetupLoggerWithJSON(t *testing.T) { } func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { - t.Parallel() require := require.New(t) tmpDir := testutil.TempDir(t, t.Name()) @@ -153,7 +146,6 @@ func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { } func TestLogger_SetupLoggerWithInValidLogPath(t *testing.T) { - t.Parallel() require := require.New(t) cfg := Config{ @@ -169,7 +161,6 @@ func TestLogger_SetupLoggerWithInValidLogPath(t *testing.T) { } func TestLogger_SetupLoggerWithInValidLogPathPermission(t *testing.T) { - t.Parallel() require := require.New(t) tmpDir := "/tmp/" + t.Name() diff --git a/logging/monitor/monitor_test.go b/logging/monitor/monitor_test.go index d5101bed8..df289aa5e 100644 --- a/logging/monitor/monitor_test.go +++ b/logging/monitor/monitor_test.go @@ -10,7 +10,6 @@ import ( ) func TestMonitor_Start(t *testing.T) { - t.Parallel() require := require.New(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ @@ -41,7 +40,6 @@ func TestMonitor_Start(t *testing.T) { } func TestMonitor_Stop(t *testing.T) { - t.Parallel() require := require.New(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ @@ -82,7 +80,6 @@ func TestMonitor_Stop(t *testing.T) { } func TestMonitor_DroppedMessages(t *testing.T) { - t.Parallel() require := require.New(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ @@ -125,7 +122,6 @@ func TestMonitor_DroppedMessages(t *testing.T) { } func TestMonitor_ZeroBufSizeDefault(t *testing.T) { - t.Parallel() require := require.New(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ @@ -162,7 +158,6 @@ func TestMonitor_ZeroBufSizeDefault(t *testing.T) { } func TestMonitor_WriteStopped(t *testing.T) { - t.Parallel() require := require.New(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ From ee50b5516313003ef0df37060e501efeeb15f242 Mon Sep 17 00:00:00 2001 From: Pierre Souchay Date: Fri, 28 Aug 2020 13:03:58 +0200 Subject: [PATCH 08/73] Added Unit test for cache reloading --- agent/cache/cache_test.go | 59 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 54794f4c3..c2442ea7c 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "golang.org/x/time/rate" ) // Test a basic Get with no indexes (and therefore no blocking queries). @@ -1220,6 +1221,64 @@ func TestCacheGet_nonBlockingType(t *testing.T) { typ.AssertExpectations(t) } +// Test a get with an index set will wait until an index that is higher +// is set in the cache. +func TestCacheReload(t *testing.T) { + t.Parallel() + + typ1 := TestType(t) + defer typ1.AssertExpectations(t) + + c := New(Options{EntryFetchRate: rate.Limit(1), EntryFetchMaxBurst: 1}) + c.RegisterType("t1", typ1) + typ1.Mock.On("Fetch", mock.Anything, mock.Anything).Return(FetchResult{Value: 42, Index: 42}, nil).Maybe() + + require.False(t, c.ReloadOptions(Options{EntryFetchRate: rate.Limit(1), EntryFetchMaxBurst: 1}), "Value should not be reloaded") + + _, meta, err := c.Get(context.Background(), "t1", TestRequest(t, RequestInfo{Key: "hello1", MinIndex: uint64(1)})) + require.NoError(t, err) + require.Equal(t, meta.Index, uint64(42)) + + testEntry := func(t *testing.T, doTest func(t *testing.T, entry cacheEntry)) { + c.entriesLock.Lock() + tEntry, ok := c.types["t1"] + require.True(t, ok) + keyName := makeEntryKey("t1", "", "", "hello1") + ok, entryValid, entry := c.getEntryLocked(tEntry, keyName, RequestInfo{}) + require.True(t, ok) + require.True(t, entryValid) + doTest(t, entry) + c.entriesLock.Unlock() + + } + testEntry(t, func(t *testing.T, entry cacheEntry) { + require.Equal(t, entry.FetchRateLimiter.Limit(), rate.Limit(1)) + require.Equal(t, entry.FetchRateLimiter.Burst(), 1) + }) + + // Modify only rateLimit + require.True(t, c.ReloadOptions(Options{EntryFetchRate: rate.Limit(100), EntryFetchMaxBurst: 1})) + testEntry(t, func(t *testing.T, entry cacheEntry) { + require.Equal(t, entry.FetchRateLimiter.Limit(), rate.Limit(100)) + require.Equal(t, entry.FetchRateLimiter.Burst(), 1) + }) + + // Modify only Burst + require.True(t, c.ReloadOptions(Options{EntryFetchRate: rate.Limit(100), EntryFetchMaxBurst: 5})) + testEntry(t, func(t *testing.T, entry cacheEntry) { + require.Equal(t, entry.FetchRateLimiter.Limit(), rate.Limit(100)) + require.Equal(t, entry.FetchRateLimiter.Burst(), 5) + }) + + // Modify only Burst and Limit at the same time + require.True(t, c.ReloadOptions(Options{EntryFetchRate: rate.Limit(1000), EntryFetchMaxBurst: 42})) + + testEntry(t, func(t *testing.T, entry cacheEntry) { + require.Equal(t, entry.FetchRateLimiter.Limit(), rate.Limit(1000)) + require.Equal(t, entry.FetchRateLimiter.Burst(), 42) + }) +} + // TestCacheThrottle checks the assumptions for the cache throttling. It sets // up a cache with Options{EntryFetchRate: 10.0, EntryFetchMaxBurst: 1}, which // allows for 10req/s, or one request every 100ms. From d8c14b51a359247acddf9096f7715ea4d0a1104d Mon Sep 17 00:00:00 2001 From: Kenia <19161242+kaxcode@users.noreply.github.com> Date: Fri, 28 Aug 2020 09:21:03 -0400 Subject: [PATCH 09/73] ui: Redesign Node list page (#8567) * Create ConsulNodeList component * Implement ConsulNodeList and the new Search/Sort to Node List page * Minor styling fix to align the first icons in composite row * Fix-up and add tests for the redesigned Node List page * Add Leader to composite row for Node List page * Add test for node leader --- .../app/components/consul-node-list/index.hbs | 41 ++++ .../app/components/consul-node-list/index.js | 5 + ui-v2/app/controllers/dc/nodes/index.js | 18 +- ui-v2/app/initializers/search.js | 3 +- ui-v2/app/initializers/sort.js | 2 + ui-v2/app/models/node.js | 22 ++ ui-v2/app/routes/dc/nodes/index.js | 1 + ui-v2/app/sort/comparators/node.js | 37 ++++ .../styles/base/components/pill/layout.scss | 4 + .../app/styles/base/components/pill/skin.scss | 4 + .../app/styles/base/icons/base-variables.scss | 2 +- .../components/composite-row/layout.scss | 1 + ui-v2/app/styles/components/pill.scss | 3 +- ui-v2/app/templates/dc/nodes/index.hbs | 195 ++++++------------ .../components/catalog-filter.feature | 62 ------ .../acceptance/dc/nodes/empty-ids.feature | 7 +- ui-v2/tests/acceptance/dc/nodes/index.feature | 23 +-- .../acceptance/dc/nodes/navigation.feature | 16 ++ .../tests/acceptance/dc/nodes/sorting.feature | 73 +++++++ .../tests/acceptance/page-navigation.feature | 1 - .../steps/dc/nodes/navigation-steps.js | 10 + .../steps/dc/nodes/sorting-steps.js | 10 + .../acceptance/steps/nodes/sorting-steps.js | 10 + .../components/consul-node-list-test.js | 26 +++ ui-v2/tests/pages.js | 5 +- ui-v2/tests/pages/dc/nodes/index.js | 13 +- 26 files changed, 345 insertions(+), 249 deletions(-) create mode 100644 ui-v2/app/components/consul-node-list/index.hbs create mode 100644 ui-v2/app/components/consul-node-list/index.js create mode 100644 ui-v2/app/sort/comparators/node.js create mode 100644 ui-v2/tests/acceptance/dc/nodes/navigation.feature create mode 100644 ui-v2/tests/acceptance/dc/nodes/sorting.feature create mode 100644 ui-v2/tests/acceptance/steps/dc/nodes/navigation-steps.js create mode 100644 ui-v2/tests/acceptance/steps/dc/nodes/sorting-steps.js create mode 100644 ui-v2/tests/acceptance/steps/nodes/sorting-steps.js create mode 100644 ui-v2/tests/integration/components/consul-node-list-test.js diff --git a/ui-v2/app/components/consul-node-list/index.hbs b/ui-v2/app/components/consul-node-list/index.hbs new file mode 100644 index 000000000..8b2c516e9 --- /dev/null +++ b/ui-v2/app/components/consul-node-list/index.hbs @@ -0,0 +1,41 @@ +{{#if (gt items.length 0)}} + + +
+
+ Health +
+
+ + {{#if (eq 'critical' item.Status)}} + At least one health check on this node is failing. + {{else if (eq 'warning' item.Status)}} + At least one health check on this node has a warning. + {{else if (eq 'passing' item.Status)}} + All health checks are passing. + {{else}} + There are no health checks. + {{/if}} + +
+
+ + {{item.Node}} + +
+ + {{#if (eq item.Address leader.Address)}} + Leader + {{/if}} +
+
+ +
+
{{item.Address}}
+
+
+
+{{/if}} diff --git a/ui-v2/app/components/consul-node-list/index.js b/ui-v2/app/components/consul-node-list/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-node-list/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/controllers/dc/nodes/index.js b/ui-v2/app/controllers/dc/nodes/index.js index 5454ee88a..a33e295d3 100644 --- a/ui-v2/app/controllers/dc/nodes/index.js +++ b/ui-v2/app/controllers/dc/nodes/index.js @@ -2,26 +2,10 @@ import Controller from '@ember/controller'; export default Controller.extend({ queryParams: { - filterBy: { - as: 'status', - }, + sortBy: 'sort', search: { as: 'filter', replace: true, }, }, - actions: { - hasStatus: function(status, checks) { - if (status === '') { - return true; - } - return checks.some(item => item.Status === status); - }, - isHealthy: function(checks) { - return !this.actions.isUnhealthy.apply(this, [checks]); - }, - isUnhealthy: function(checks) { - return checks.some(item => item.Status === 'critical' || item.Status === 'warning'); - }, - }, }); diff --git a/ui-v2/app/initializers/search.js b/ui-v2/app/initializers/search.js index 42b92fad5..e6ed797ab 100644 --- a/ui-v2/app/initializers/search.js +++ b/ui-v2/app/initializers/search.js @@ -23,8 +23,7 @@ export function initialize(application) { policy: policy(filterable), role: role(filterable), kv: kv(filterable), - healthyNode: node(filterable), - unhealthyNode: node(filterable), + node: node(filterable), serviceInstance: serviceNode(filterable), nodeservice: nodeService(filterable), service: service(filterable), diff --git a/ui-v2/app/initializers/sort.js b/ui-v2/app/initializers/sort.js index 4649bcfb6..6b173481b 100644 --- a/ui-v2/app/initializers/sort.js +++ b/ui-v2/app/initializers/sort.js @@ -6,6 +6,7 @@ import token from 'consul-ui/sort/comparators/token'; import role from 'consul-ui/sort/comparators/role'; import policy from 'consul-ui/sort/comparators/policy'; import nspace from 'consul-ui/sort/comparators/nspace'; +import node from 'consul-ui/sort/comparators/node'; export function initialize(container) { // Service-less injection using private properties at a per-project level @@ -19,6 +20,7 @@ export function initialize(container) { role: role(), policy: policy(), nspace: nspace(), + node: node(), }; Sort.reopen({ comparator: function(type) { diff --git a/ui-v2/app/models/node.js b/ui-v2/app/models/node.js index 2081f6f71..f21186f84 100644 --- a/ui-v2/app/models/node.js +++ b/ui-v2/app/models/node.js @@ -1,5 +1,6 @@ import Model from 'ember-data/model'; import attr from 'ember-data/attr'; +import { computed } from '@ember/object'; export const PRIMARY_KEY = 'uid'; export const SLUG_KEY = 'ID'; @@ -20,4 +21,25 @@ export default Model.extend({ Coord: attr(), SyncTime: attr('number'), meta: attr(), + Status: computed('Checks.[]', 'ChecksCritical', 'ChecksPassing', 'ChecksWarning', function() { + switch (true) { + case this.ChecksCritical !== 0: + return 'critical'; + case this.ChecksWarning !== 0: + return 'warning'; + case this.ChecksPassing !== 0: + return 'passing'; + default: + return 'empty'; + } + }), + ChecksCritical: computed('Checks.[]', function() { + return this.Checks.filter(item => item.Status === 'critical').length; + }), + ChecksPassing: computed('Checks.[]', function() { + return this.Checks.filter(item => item.Status === 'passing').length; + }), + ChecksWarning: computed('Checks.[]', function() { + return this.Checks.filter(item => item.Status === 'warning').length; + }), }); diff --git a/ui-v2/app/routes/dc/nodes/index.js b/ui-v2/app/routes/dc/nodes/index.js index 783f1efa4..472366e5b 100644 --- a/ui-v2/app/routes/dc/nodes/index.js +++ b/ui-v2/app/routes/dc/nodes/index.js @@ -6,6 +6,7 @@ export default Route.extend({ repo: service('repository/node'), data: service('data-source/service'), queryParams: { + sortBy: 'sort', search: { as: 'filter', replace: true, diff --git a/ui-v2/app/sort/comparators/node.js b/ui-v2/app/sort/comparators/node.js new file mode 100644 index 000000000..a5c906d10 --- /dev/null +++ b/ui-v2/app/sort/comparators/node.js @@ -0,0 +1,37 @@ +export default () => key => { + if (key.startsWith('Status:')) { + return function(serviceA, serviceB) { + const [, dir] = key.split(':'); + let a, b; + if (dir === 'asc') { + b = serviceA; + a = serviceB; + } else { + a = serviceA; + b = serviceB; + } + switch (true) { + case a.ChecksCritical > b.ChecksCritical: + return 1; + case a.ChecksCritical < b.ChecksCritical: + return -1; + default: + switch (true) { + case a.ChecksWarning > b.ChecksWarning: + return 1; + case a.ChecksWarning < b.ChecksWarning: + return -1; + default: + switch (true) { + case a.ChecksPassing < b.ChecksPassing: + return 1; + case a.ChecksPassing > b.ChecksPassing: + return -1; + } + } + return 0; + } + }; + } + return key; +}; diff --git a/ui-v2/app/styles/base/components/pill/layout.scss b/ui-v2/app/styles/base/components/pill/layout.scss index 6f97559ae..56cf8623b 100644 --- a/ui-v2/app/styles/base/components/pill/layout.scss +++ b/ui-v2/app/styles/base/components/pill/layout.scss @@ -27,5 +27,9 @@ width: 14px; height: 14px; margin-right: 2px; + margin-top: 1px; position: relative; } +%reduced-pill.leader::before { + margin-right: 4px; +} diff --git a/ui-v2/app/styles/base/components/pill/skin.scss b/ui-v2/app/styles/base/components/pill/skin.scss index 31d1ed0b9..2ba8e1ff9 100644 --- a/ui-v2/app/styles/base/components/pill/skin.scss +++ b/ui-v2/app/styles/base/components/pill/skin.scss @@ -34,3 +34,7 @@ @extend %with-gateway-mask; background-color: $gray-500; } +%reduced-pill.leader::before { + @extend %with-star-outline-mask; + background-color: $gray-500; +} diff --git a/ui-v2/app/styles/base/icons/base-variables.scss b/ui-v2/app/styles/base/icons/base-variables.scss index d3e2e48d9..09fc28c59 100644 --- a/ui-v2/app/styles/base/icons/base-variables.scss +++ b/ui-v2/app/styles/base/icons/base-variables.scss @@ -156,7 +156,7 @@ $settings-svg: url('data:image/svg+xml;charset=UTF-8,'); $sort-svg: url('data:image/svg+xml;charset=UTF-8,'); $star-fill-svg: url('data:image/svg+xml;charset=UTF-8,'); -$star-outline-svg: url('data:image/svg+xml;charset=UTF-8,'); +$star-outline-svg: url('data:image/svg+xml;charset=UTF-8,'); $star-svg: url('data:image/svg+xml;charset=UTF-8,'); $sub-left-svg: url('data:image/svg+xml;charset=UTF-8,'); $sub-right-svg: url('data:image/svg+xml;charset=UTF-8,'); diff --git a/ui-v2/app/styles/components/composite-row/layout.scss b/ui-v2/app/styles/components/composite-row/layout.scss index 2f08ea602..289dd4f3e 100644 --- a/ui-v2/app/styles/components/composite-row/layout.scss +++ b/ui-v2/app/styles/components/composite-row/layout.scss @@ -44,6 +44,7 @@ } %composite-row-icon { margin-right: 6px; + margin-left: -2px; } %composite-row-icon dt { display: none; diff --git a/ui-v2/app/styles/components/pill.scss b/ui-v2/app/styles/components/pill.scss index af6ecd29c..c9222f558 100644 --- a/ui-v2/app/styles/components/pill.scss +++ b/ui-v2/app/styles/components/pill.scss @@ -6,7 +6,8 @@ td strong { span.policy-service-identity, span.policy-node-identity, .consul-external-source, -.consul-kind { +.consul-kind, +.leader { @extend %reduced-pill; } span.policy-service-identity::before, diff --git a/ui-v2/app/templates/dc/nodes/index.hbs b/ui-v2/app/templates/dc/nodes/index.hbs index 3ecdf5cf9..c5b38372b 100644 --- a/ui-v2/app/templates/dc/nodes/index.hbs +++ b/ui-v2/app/templates/dc/nodes/index.hbs @@ -1,137 +1,74 @@ {{title 'Nodes'}} -{{#let (selectable-key-values - (array "" "All (Any Status)") - (array "critical" "Critical Checks") - (array "warning" "Warning Checks") - (array "passing" "Passing Checks") - selected=filterBy - ) - as |filter| -}} - - -

- Nodes {{format-number items.length}} total -

- -
- -{{#if (gt items.length 0) }} +{{#let (or sortBy "Node:asc") as |sort|}} + + +

+ Nodes {{format-number items.length}} total +

+ +
+ + {{#if (gt items.length 0) }} -{{/if}} - - -{{#let (filter-by "Checks" (action "isUnhealthy") items) as |unhealthy|}} - {{#if (gt unhealthy.length 0) }} -
-

Unhealthy Nodes

-
- {{! think about 2 differing views here }} -
    - - - {{#each unhealthy as |item|}} - - - {{#if (eq item.Address leader.Address)}} - Leader - {{/if}} - - - {{/each}} - - -
  • - - -

    No nodes found

    -
    - -

    - There don't seem to be any nodes matching that search. -

    -
    -
    -
  • -
    -
    -
-
-
- {{/if}} -{{/let}} -{{#let (filter-by "Checks" (action "isHealthy") items) as |healthy|}} - {{#if (gt healthy.length 0) }} -
-

Healthy Nodes

- - - - - - {{#if (eq item.Address leader.Address)}} - Leader - {{/if}} - - - - - - - -

No nodes found

-
- -

- There don't seem to be any nodes matching that search. -

-
-
-
-
-
- {{/if}} -{{/let}} -{{#if (eq items.length 0) }} - - -

Welcome to Nodes

+ class="with-sort" + > + + + + + {{#let (from-entries (array + (array "Node:asc" "A to Z") + (array "Node:desc" "Z to A") + (array "Status:asc" "Unhealthy to Healthy") + (array "Status:desc" "Healthy to Unhealthy") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + + {{/let}} + + - -

- There don't seem to be any nodes, or you may not have access to view nodes yet. -

+ + {{/if}} +
+ + {{#let (sort-by (comparator 'node' sort) items) as |sorted|}} + + + -
-{{/if}} -
-
+ + + +

+ There don't seem to be any registered nodes, or you may not have access to view nodes yet. +

+
+
+
+ + {{/let}} +
+
{{/let}} \ No newline at end of file diff --git a/ui-v2/tests/acceptance/components/catalog-filter.feature b/ui-v2/tests/acceptance/components/catalog-filter.feature index 68b9e02c5..82a4b14b4 100644 --- a/ui-v2/tests/acceptance/components/catalog-filter.feature +++ b/ui-v2/tests/acceptance/components/catalog-filter.feature @@ -3,68 +3,6 @@ # to use the name filter UI also, then they can stay together @setupApplicationTest Feature: components / catalog-filter - Scenario: Filtering [Model] - Given 1 datacenter model with the value "dc-1" - And 4 service models from yaml - --- - - ChecksPassing: 1 - ChecksWarning: 0 - ChecksCritical: 0 - - ChecksPassing: 0 - ChecksWarning: 1 - ChecksCritical: 0 - - ChecksPassing: 0 - ChecksWarning: 0 - ChecksCritical: 1 - - ChecksPassing: 1 - ChecksWarning: 0 - ChecksCritical: 0 - --- - And 4 node models from yaml - --- - - Checks: - - Status: passing - - Checks: - - Status: warning - - Checks: - - Status: critical - - Checks: - - Status: passing - --- - When I visit the [Page] page for yaml - --- - dc: dc-1 - --- - Then the url should be [Url] - - Then I see 4 [Model] models - And I see allIsSelected on the filter - - When I click passing on the filter - And I see passingIsSelected on the filter - And I see 2 [Model] models - - When I click warning on the filter - And I see warningIsSelected on the filter - And I see 1 [Model] model - - When I click critical on the filter - And I see criticalIsSelected on the filter - And I see 1 [Model] model - - When I click all on the filter - And I see allIsSelected on the filter - Then I fill in with yaml - --- - s: [Model]-0 - --- - And I see 1 [Model] model with the name "[Model]-0" - - Where: - ------------------------------------------------- - | Model | Page | Url | - | node | nodes | /dc-1/nodes | - ------------------------------------------------- Scenario: Filtering [Model] in [Page] Given 1 datacenter model with the value "dc1" And 1 node model from yaml diff --git a/ui-v2/tests/acceptance/dc/nodes/empty-ids.feature b/ui-v2/tests/acceptance/dc/nodes/empty-ids.feature index e4807d294..52df760af 100644 --- a/ui-v2/tests/acceptance/dc/nodes/empty-ids.feature +++ b/ui-v2/tests/acceptance/dc/nodes/empty-ids.feature @@ -20,14 +20,11 @@ Feature: dc / nodes / empty-ids: Hedge for if nodes come in over the API with no dc: dc-1 --- Then the url should be /dc-1/nodes - Then I see name on the nodes like yaml + Then I see name on the nodes vertically like yaml --- - name-1 - name-2 - name-3 - name-4 - name-5 - -@ignore - Scenario: Visually comparing - Then the ".unhealthy" element should look like the "/node_modules/@hashicorp/consul-testing-extras/fixtures/dc/nodes/empty-ids.png" image + --- \ No newline at end of file diff --git a/ui-v2/tests/acceptance/dc/nodes/index.feature b/ui-v2/tests/acceptance/dc/nodes/index.feature index e42bb6821..07a4f3ef4 100644 --- a/ui-v2/tests/acceptance/dc/nodes/index.feature +++ b/ui-v2/tests/acceptance/dc/nodes/index.feature @@ -16,7 +16,7 @@ Feature: dc / nodes / index Then the url should be /dc-1/nodes And the title should be "Nodes - Consul" Then I see 3 node models - Scenario: Seeing the leader in unhealthy listing + Scenario: Seeing the leader in node listing Given 3 node models from yaml --- - Address: 211.245.86.75 @@ -32,24 +32,7 @@ Feature: dc / nodes / index --- Then the url should be /dc-1/nodes Then I see 3 node models - And I see leader on the unHealthyNodes - Scenario: Seeing the leader in healthy listing - Given 3 node models from yaml - --- - - Address: 211.245.86.75 - Checks: - - Status: passing - Name: Passing check - - Address: 10.0.0.1 - - Address: 10.0.0.3 - --- - When I visit the nodes page for yaml - --- - dc: dc-1 - --- - Then the url should be /dc-1/nodes - Then I see 3 node models - And I see leader on the healthyNodes + And I see leader on the nodes.0 Scenario: Searching the nodes with name and IP address Given 3 node models from yaml --- @@ -76,4 +59,4 @@ Feature: dc / nodes / index s: 10.0.0.1 --- And I see 1 node model - And I see 1 node model with the name "node-02" + And I see 1 node model with the name "node-02" \ No newline at end of file diff --git a/ui-v2/tests/acceptance/dc/nodes/navigation.feature b/ui-v2/tests/acceptance/dc/nodes/navigation.feature new file mode 100644 index 000000000..116e26ff8 --- /dev/null +++ b/ui-v2/tests/acceptance/dc/nodes/navigation.feature @@ -0,0 +1,16 @@ +@setupApplicationTest +Feature: dc / nodes / navigation + Scenario: Clicking a node in the listing and back again + Given 1 datacenter model with the value "dc-1" + And 3 node models + When I visit the nodes page for yaml + --- + dc: dc-1 + --- + Then the url should be /dc-1/nodes + And the title should be "Nodes - Consul" + Then I see 3 node models + When I click node on the nodes + And I click "[data-test-back]" + Then the url should be /dc-1/nodes + diff --git a/ui-v2/tests/acceptance/dc/nodes/sorting.feature b/ui-v2/tests/acceptance/dc/nodes/sorting.feature new file mode 100644 index 000000000..d23795d92 --- /dev/null +++ b/ui-v2/tests/acceptance/dc/nodes/sorting.feature @@ -0,0 +1,73 @@ +@setupApplicationTest +Feature: dc / nodes / sorting + Scenario: + Given 1 datacenter model with the value "dc-1" + And 6 node models from yaml + --- + - Node: Node-A + Checks: + - Status: critical + - Node: Node-B + Checks: + - Status: passing + - Node: Node-C + Checks: + - Status: warning + - Node: Node-D + Checks: + - Status: critical + - Node: Node-E + Checks: + - Status: critical + - Node: Node-F + Checks: + - Status: warning + --- + When I visit the nodes page for yaml + --- + dc: dc-1 + --- + When I click selected on the sort + When I click options.3.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-B + - Node-C + - Node-F + - Node-A + - Node-D + - Node-E + --- + When I click selected on the sort + When I click options.2.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-A + - Node-D + - Node-E + - Node-C + - Node-F + - Node-B + --- + When I click selected on the sort + When I click options.0.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-A + - Node-B + - Node-C + - Node-D + - Node-E + - Node-F + --- + When I click selected on the sort + When I click options.1.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-F + - Node-E + - Node-D + - Node-C + - Node-B + - Node-A + --- diff --git a/ui-v2/tests/acceptance/page-navigation.feature b/ui-v2/tests/acceptance/page-navigation.feature index 6331a044e..b1e98fcf3 100644 --- a/ui-v2/tests/acceptance/page-navigation.feature +++ b/ui-v2/tests/acceptance/page-navigation.feature @@ -43,7 +43,6 @@ Feature: page-navigation ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | Item | Model | URL | Endpoint | Back | | service | services | /dc-1/services/service-0/instances | /v1/discovery-chain/service-0?dc=dc-1&ns=@namespace | /dc-1/services | - | node | nodes | /dc-1/nodes/node-0/health-checks | /v1/session/node/node-0?dc=dc-1&ns=@namespace | /dc-1/nodes | | kv | kvs | /dc-1/kv/0-key-value/edit | /v1/session/info/ee52203d-989f-4f7a-ab5a-2bef004164ca?dc=dc-1&ns=@namespace | /dc-1/kv | # | acl | acls | /dc-1/acls/anonymous | /v1/acl/info/anonymous?dc=dc-1 | /dc-1/acls | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/ui-v2/tests/acceptance/steps/dc/nodes/navigation-steps.js b/ui-v2/tests/acceptance/steps/dc/nodes/navigation-steps.js new file mode 100644 index 000000000..ba1093295 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/nodes/navigation-steps.js @@ -0,0 +1,10 @@ +import steps from '../../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/acceptance/steps/dc/nodes/sorting-steps.js b/ui-v2/tests/acceptance/steps/dc/nodes/sorting-steps.js new file mode 100644 index 000000000..ba1093295 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/nodes/sorting-steps.js @@ -0,0 +1,10 @@ +import steps from '../../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/acceptance/steps/nodes/sorting-steps.js b/ui-v2/tests/acceptance/steps/nodes/sorting-steps.js new file mode 100644 index 000000000..3c9a76f69 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/nodes/sorting-steps.js @@ -0,0 +1,10 @@ +import steps from '../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/integration/components/consul-node-list-test.js b/ui-v2/tests/integration/components/consul-node-list-test.js new file mode 100644 index 000000000..6ad53b10d --- /dev/null +++ b/ui-v2/tests/integration/components/consul-node-list-test.js @@ -0,0 +1,26 @@ +import { module, skip } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +module('Integration | Component | consul-node-list', function(hooks) { + setupRenderingTest(hooks); + + skip('it renders', async function(assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.set('myAction', function(val) { ... }); + + await render(hbs``); + + assert.equal(this.element.textContent.trim(), ''); + + // Template block usage: + await render(hbs` + + template block text + + `); + + assert.equal(this.element.textContent.trim(), 'template block text'); + }); +}); diff --git a/ui-v2/tests/pages.js b/ui-v2/tests/pages.js index 24ae13a0a..2735a3a47 100644 --- a/ui-v2/tests/pages.js +++ b/ui-v2/tests/pages.js @@ -79,9 +79,6 @@ const tokenList = tokenListFactory(clickable, attribute, collection, deletable); const authForm = authFormFactory(submitable, clickable, attribute); const freetextFilter = freetextFilterFactory(triggerable); const catalogToolbar = searchBarFactory(freetextFilter); -const catalogFilter = searchBarFactory(freetextFilter, () => - radiogroup('status', ['', 'passing', 'warning', 'critical']) -); const aclFilter = searchBarFactory(freetextFilter, () => radiogroup('type', ['', 'management', 'client']) ); @@ -153,7 +150,7 @@ export default { service(visitable, attribute, collection, text, consulIntentionList, catalogToolbar, tabgroup) ), instance: create(instance(visitable, attribute, collection, text, tabgroup)), - nodes: create(nodes(visitable, clickable, attribute, collection, catalogFilter)), + nodes: create(nodes(visitable, text, clickable, attribute, collection, popoverSelect)), node: create(node(visitable, deletable, clickable, attribute, collection, tabgroup, text)), kvs: create(kvs(visitable, creatable, consulKvList)), kv: create(kv(visitable, attribute, submitable, deletable, cancelable, clickable)), diff --git a/ui-v2/tests/pages/dc/nodes/index.js b/ui-v2/tests/pages/dc/nodes/index.js index 335c87afe..c26bbd415 100644 --- a/ui-v2/tests/pages/dc/nodes/index.js +++ b/ui-v2/tests/pages/dc/nodes/index.js @@ -1,14 +1,13 @@ -export default function(visitable, clickable, attribute, collection, filter) { +export default function(visitable, text, clickable, attribute, collection, popoverSelect) { const node = { - name: attribute('data-test-node'), + name: text('[data-test-node]'), leader: attribute('data-test-leader', '[data-test-leader]'), - node: clickable('header a'), + node: clickable('a'), }; return { visit: visitable('/:dc/nodes'), - nodes: collection('[data-test-node]', node), - healthyNodes: collection('.healthy [data-test-node]', node), - unHealthyNodes: collection('.unhealthy [data-test-node]', node), - filter: filter('[data-test-catalog-filter]'), + nodes: collection('.consul-node-list [data-test-list-row]', node), + home: clickable('[data-test-home]'), + sort: popoverSelect(), }; } From 2a52f89b8dd3e6bb96d39283dc3e631218d004fd Mon Sep 17 00:00:00 2001 From: Pierre Souchay Date: Fri, 28 Aug 2020 23:01:04 +0200 Subject: [PATCH 10/73] Added changelog for #8552 --- .changelog/8552.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/8552.txt diff --git a/.changelog/8552.txt b/.changelog/8552.txt new file mode 100644 index 000000000..aa4dc69aa --- /dev/null +++ b/.changelog/8552.txt @@ -0,0 +1,3 @@ +```release-note:feature +cache: Config parameters for cache throttling are now reloaded automatically on agent reload. Restarting the agent is not needed anymore. +``` From 335c604ced2653a8c891662e1b6125a8385665a2 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 31 Aug 2020 13:12:17 -0400 Subject: [PATCH 11/73] Merge of auto-config and auto-encrypt code (#8523) auto-encrypt is now handled as a special case of auto-config. This also is moving all the cert-monitor code into the auto-config package. --- agent/agent.go | 69 +- agent/auto-config/auto_config.go | 539 +++----- agent/auto-config/auto_config_test.go | 1307 +++++++++++++------- agent/auto-config/auto_encrypt.go | 111 ++ agent/auto-config/auto_encrypt_test.go | 562 +++++++++ agent/auto-config/config.go | 72 +- agent/auto-config/config_translate.go | 20 +- agent/auto-config/config_translate_test.go | 41 + agent/auto-config/mock_test.go | 337 +++++ agent/auto-config/persist.go | 86 ++ agent/auto-config/run.go | 192 +++ agent/auto-config/server_addr.go | 111 ++ agent/auto-config/tls.go | 280 +++++ agent/auto-config/tls_test.go | 56 + agent/cert-monitor/cert_monitor.go | 505 -------- agent/cert-monitor/cert_monitor_test.go | 731 ----------- agent/cert-monitor/config.go | 150 --- agent/consul/auto_encrypt.go | 239 ---- agent/consul/auto_encrypt_test.go | 205 --- agent/setup.go | 52 +- proto/translate.go | 14 +- proto/translate_test.go | 25 +- 22 files changed, 2934 insertions(+), 2770 deletions(-) create mode 100644 agent/auto-config/auto_encrypt.go create mode 100644 agent/auto-config/auto_encrypt_test.go create mode 100644 agent/auto-config/mock_test.go create mode 100644 agent/auto-config/persist.go create mode 100644 agent/auto-config/run.go create mode 100644 agent/auto-config/server_addr.go create mode 100644 agent/auto-config/tls.go create mode 100644 agent/auto-config/tls_test.go delete mode 100644 agent/cert-monitor/cert_monitor.go delete mode 100644 agent/cert-monitor/cert_monitor_test.go delete mode 100644 agent/cert-monitor/config.go delete mode 100644 agent/consul/auto_encrypt.go delete mode 100644 agent/consul/auto_encrypt_test.go diff --git a/agent/agent.go b/agent/agent.go index f6c7f3418..0c639da9a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -31,7 +31,6 @@ import ( autoconf "github.com/hashicorp/consul/agent/auto-config" "github.com/hashicorp/consul/agent/cache" cachetype "github.com/hashicorp/consul/agent/cache-types" - certmon "github.com/hashicorp/consul/agent/cert-monitor" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" @@ -162,8 +161,6 @@ type notifier interface { type Agent struct { autoConf *autoconf.AutoConfig - certMonitor *certmon.CertMonitor - // config is the agent configuration. config *config.RuntimeConfig @@ -373,6 +370,11 @@ func New(bd BaseDeps) (*Agent, error) { // pass the agent itself so its safe to move here. a.registerCache() + // TODO: move to newBaseDeps + // TODO: handle error + a.loadTokens(a.config) + a.loadEnterpriseTokens(a.config) + return &a, nil } @@ -426,11 +428,6 @@ func (a *Agent) Start(ctx context.Context) error { return fmt.Errorf("Failed to load TLS configurations after applying auto-config settings: %w", err) } - // TODO: move to newBaseDeps - // TODO: handle error - a.loadTokens(a.config) - a.loadEnterpriseTokens(a.config) - // create the local state a.State = local.NewState(LocalConfig(c), a.logger, a.tokens) @@ -495,43 +492,6 @@ func (a *Agent) Start(ctx context.Context) error { a.State.Delegate = a.delegate a.State.TriggerSyncChanges = a.sync.SyncChanges.Trigger - if a.config.AutoEncryptTLS && !a.config.ServerMode { - reply, err := a.autoEncryptInitialCertificate(ctx) - if err != nil { - return fmt.Errorf("AutoEncrypt failed: %s", err) - } - - cmConfig := new(certmon.Config). - WithCache(a.cache). - WithLogger(a.logger.Named(logging.AutoEncrypt)). - WithTLSConfigurator(a.tlsConfigurator). - WithTokens(a.tokens). - WithFallback(a.autoEncryptInitialCertificate). - WithDNSSANs(a.config.AutoEncryptDNSSAN). - WithIPSANs(a.config.AutoEncryptIPSAN). - WithDatacenter(a.config.Datacenter). - WithNodeName(a.config.NodeName) - - monitor, err := certmon.New(cmConfig) - if err != nil { - return fmt.Errorf("AutoEncrypt failed to setup certificate monitor: %w", err) - } - if err := monitor.Update(reply); err != nil { - return fmt.Errorf("AutoEncrypt failed to setup certificate monitor: %w", err) - } - a.certMonitor = monitor - - // we don't need to worry about ever calling Stop as we have tied the go routines - // to the agents lifetime by using the StopCh. Also the agent itself doesn't have - // a need of ensuring that the go routine was stopped before performing any action - // so we can ignore the chan in the return. - if _, err := a.certMonitor.Start(&lib.StopChannelContext{StopCh: a.shutdownCh}); err != nil { - return fmt.Errorf("AutoEncrypt failed to start certificate monitor: %w", err) - } - - a.logger.Info("automatically upgraded to TLS") - } - if err := a.autoConf.Start(&lib.StopChannelContext{StopCh: a.shutdownCh}); err != nil { return fmt.Errorf("AutoConf failed to start certificate monitor: %w", err) } @@ -645,19 +605,6 @@ func (a *Agent) Start(ctx context.Context) error { return nil } -func (a *Agent) autoEncryptInitialCertificate(ctx context.Context) (*structs.SignedResponse, error) { - client := a.delegate.(*consul.Client) - - addrs := a.config.StartJoinAddrsLAN - disco, err := newDiscover() - if err != nil && len(addrs) == 0 { - return nil, err - } - addrs = append(addrs, retryJoinAddrs(disco, retryJoinSerfVariant, "LAN", a.config.RetryJoinLAN, a.logger)...) - - return client.RequestAutoEncryptCerts(ctx, addrs, a.config.ServerPort, a.tokens.AgentToken(), a.config.AutoEncryptDNSSAN, a.config.AutoEncryptIPSAN) -} - func (a *Agent) listenAndServeGRPC() error { if len(a.config.GRPCAddrs) < 1 { return nil @@ -1380,12 +1327,6 @@ func (a *Agent) ShutdownAgent() error { // this should help them to be stopped more quickly a.autoConf.Stop() - if a.certMonitor != nil { - // this would be cancelled anyways (by the closing of the shutdown ch) - // but this should help them to be stopped more quickly - a.certMonitor.Stop() - } - // Stop the service manager (must happen before we take the stateLock to avoid deadlock) if a.serviceManager != nil { a.serviceManager.Stop() diff --git a/agent/auto-config/auto_config.go b/agent/auto-config/auto_config.go index 939879a76..c2dd942c6 100644 --- a/agent/auto-config/auto_config.go +++ b/agent/auto-config/auto_config.go @@ -4,62 +4,54 @@ import ( "context" "fmt" "io/ioutil" - "net" - "os" - "path/filepath" - "strconv" - "strings" + "sync" "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/proto/pbautoconf" - "github.com/hashicorp/go-discover" - discoverk8s "github.com/hashicorp/go-discover/provider/k8s" "github.com/hashicorp/go-hclog" - - "github.com/golang/protobuf/jsonpb" -) - -const ( - // autoConfigFileName is the name of the file that the agent auto-config settings are - // stored in within the data directory - autoConfigFileName = "auto-config.json" - - dummyTrustDomain = "dummytrustdomain" -) - -var ( - pbMarshaler = &jsonpb.Marshaler{ - OrigName: false, - EnumsAsInts: false, - Indent: " ", - EmitDefaults: true, - } - - pbUnmarshaler = &jsonpb.Unmarshaler{ - AllowUnknownFields: false, - } ) // AutoConfig is all the state necessary for being able to parse a configuration // as well as perform the necessary RPCs to perform Agent Auto Configuration. -// -// NOTE: This struct and methods on it are not currently thread/goroutine safe. -// However it doesn't spawn any of its own go routines yet and is used in a -// synchronous fashion. In the future if either of those two conditions change -// then we will need to add some locking here. I am deferring that for now -// to help ease the review of this already large PR. type AutoConfig struct { + sync.Mutex + acConfig Config logger hclog.Logger - certMonitor CertMonitor + cache Cache + waiter *lib.RetryWaiter config *config.RuntimeConfig autoConfigResponse *pbautoconf.AutoConfigResponse autoConfigSource config.Source + + running bool + done chan struct{} + // cancel is used to cancel the entire AutoConfig + // go routine. This is the main field protected + // by the mutex as it being non-nil indicates that + // the go routine has been started and is stoppable. + // note that it doesn't indcate that the go routine + // is currently running. + cancel context.CancelFunc + + // cancelWatches is used to cancel the existing + // cache watches regarding the agents certificate. This is + // mainly only necessary when the Agent token changes. + cancelWatches context.CancelFunc + + // cacheUpdates is the chan used to have the cache + // send us back events + cacheUpdates chan cache.UpdateEvent + + // tokenUpdates is the struct used to receive + // events from the token store when the Agent + // token is updated. + tokenUpdates token.Notifier } // New creates a new AutoConfig object for providing automatic Consul configuration. @@ -69,6 +61,19 @@ func New(config Config) (*AutoConfig, error) { return nil, fmt.Errorf("must provide a config loader") case config.DirectRPC == nil: return nil, fmt.Errorf("must provide a direct RPC delegate") + case config.Cache == nil: + return nil, fmt.Errorf("must provide a cache") + case config.TLSConfigurator == nil: + return nil, fmt.Errorf("must provide a TLS configurator") + case config.Tokens == nil: + return nil, fmt.Errorf("must provide a token store") + } + + if config.FallbackLeeway == 0 { + config.FallbackLeeway = 10 * time.Second + } + if config.FallbackRetry == 0 { + config.FallbackRetry = time.Minute } logger := config.Logger @@ -83,15 +88,16 @@ func New(config Config) (*AutoConfig, error) { } return &AutoConfig{ - acConfig: config, - logger: logger, - certMonitor: config.CertMonitor, + acConfig: config, + logger: logger, }, nil } // ReadConfig will parse the current configuration and inject any // auto-config sources if present into the correct place in the parsing chain. func (ac *AutoConfig) ReadConfig() (*config.RuntimeConfig, error) { + ac.Lock() + defer ac.Unlock() cfg, warnings, err := ac.acConfig.Loader(ac.autoConfigSource) if err != nil { return cfg, err @@ -105,46 +111,6 @@ func (ac *AutoConfig) ReadConfig() (*config.RuntimeConfig, error) { return cfg, nil } -// restorePersistedAutoConfig will attempt to load the persisted auto-config -// settings from the data directory. It returns true either when there was an -// unrecoverable error or when the configuration was successfully loaded from -// disk. Recoverable errors, such as "file not found" are suppressed and this -// method will return false for the first boolean. -func (ac *AutoConfig) restorePersistedAutoConfig() (bool, error) { - if ac.config.DataDir == "" { - // no data directory means we don't have anything to potentially load - return false, nil - } - - path := filepath.Join(ac.config.DataDir, autoConfigFileName) - ac.logger.Debug("attempting to restore any persisted configuration", "path", path) - - content, err := ioutil.ReadFile(path) - if err == nil { - rdr := strings.NewReader(string(content)) - - var resp pbautoconf.AutoConfigResponse - if err := pbUnmarshaler.Unmarshal(rdr, &resp); err != nil { - return false, fmt.Errorf("failed to decode persisted auto-config data: %w", err) - } - - if err := ac.update(&resp); err != nil { - return false, fmt.Errorf("error restoring persisted auto-config response: %w", err) - } - - ac.logger.Info("restored persisted configuration", "path", path) - return true, nil - } - - if !os.IsNotExist(err) { - return true, fmt.Errorf("failed to load %s: %w", path, err) - } - - // ignore non-existence errors as that is an indicator that we haven't - // performed the auto configuration before - return false, nil -} - // InitialConfiguration will perform a one-time RPC request to the configured servers // to retrieve various cluster wide configurations. See the proto/pbautoconf/auto_config.proto // file for a complete reference of what configurations can be applied in this manner. @@ -164,30 +130,49 @@ func (ac *AutoConfig) InitialConfiguration(ctx context.Context) (*config.Runtime ac.config = config } - if !ac.config.AutoConfig.Enabled { - return ac.config, nil - } - - ready, err := ac.restorePersistedAutoConfig() - if err != nil { - return nil, err - } - - if !ready { - ac.logger.Info("retrieving initial agent auto configuration remotely") - if err := ac.getInitialConfiguration(ctx); err != nil { + switch { + case ac.config.AutoConfig.Enabled: + resp, err := ac.readPersistedAutoConfig() + if err != nil { return nil, err } - } - // re-read the configuration now that we have our initial auto-config - config, err := ac.ReadConfig() - if err != nil { - return nil, err - } + if resp == nil { + ac.logger.Info("retrieving initial agent auto configuration remotely") + resp, err = ac.getInitialConfiguration(ctx) + if err != nil { + return nil, err + } + } - ac.config = config - return ac.config, nil + ac.logger.Debug("updating auto-config settings") + if err = ac.recordInitialConfiguration(resp); err != nil { + return nil, err + } + + // re-read the configuration now that we have our initial auto-config + config, err := ac.ReadConfig() + if err != nil { + return nil, err + } + + ac.config = config + return ac.config, nil + case ac.config.AutoEncryptTLS: + certs, err := ac.autoEncryptInitialCerts(ctx) + if err != nil { + return nil, err + } + + if err := ac.setInitialTLSCertificates(certs); err != nil { + return nil, err + } + + ac.logger.Info("automatically upgraded to TLS") + return ac.config, nil + default: + return ac.config, nil + } } // introToken is responsible for determining the correct intro token to use @@ -217,118 +202,45 @@ func (ac *AutoConfig) introToken() (string, error) { return token, nil } -// serverHosts is responsible for taking the list of server addresses and -// resolving any go-discover provider invocations. It will then return a list -// of hosts. These might be hostnames and is expected that DNS resolution may -// be performed after this function runs. Additionally these may contain ports -// so SplitHostPort could also be necessary. -func (ac *AutoConfig) serverHosts() ([]string, error) { - servers := ac.config.AutoConfig.ServerAddresses +// recordInitialConfiguration is responsible for recording the AutoConfigResponse from +// the AutoConfig.InitialConfiguration RPC. It is an all-in-one function to do the following +// * update the Agent token in the token store +func (ac *AutoConfig) recordInitialConfiguration(resp *pbautoconf.AutoConfigResponse) error { + ac.autoConfigResponse = resp - providers := make(map[string]discover.Provider) - for k, v := range discover.Providers { - providers[k] = v + ac.autoConfigSource = config.LiteralSource{ + Name: autoConfigFileName, + Config: translateConfig(resp.Config), } - providers["k8s"] = &discoverk8s.Provider{} - - disco, err := discover.New( - discover.WithUserAgent(lib.UserAgent()), - discover.WithProviders(providers), - ) + // we need to re-read the configuration to determine what the correct ACL + // token to push into the token store is. Any user provided token will override + // any AutoConfig generated token. + config, err := ac.ReadConfig() if err != nil { - return nil, fmt.Errorf("Failed to create go-discover resolver: %w", err) + return fmt.Errorf("failed to fully resolve configuration: %w", err) } - var addrs []string - for _, addr := range servers { - switch { - case strings.Contains(addr, "provider="): - resolved, err := disco.Addrs(addr, ac.logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true})) - if err != nil { - ac.logger.Error("failed to resolve go-discover auto-config servers", "configuration", addr, "err", err) - continue - } + // ignoring the return value which would indicate a change in the token + _ = ac.acConfig.Tokens.UpdateAgentToken(config.ACLAgentToken, token.TokenSourceConfig) - addrs = append(addrs, resolved...) - ac.logger.Debug("discovered auto-config servers", "servers", resolved) - default: - addrs = append(addrs, addr) - } - } - - if len(addrs) == 0 { - return nil, fmt.Errorf("no auto-config server addresses available for use") - } - - return addrs, nil -} - -// resolveHost will take a single host string and convert it to a list of TCPAddrs -// This will process any port in the input as well as looking up the hostname using -// normal DNS resolution. -func (ac *AutoConfig) resolveHost(hostPort string) []net.TCPAddr { - port := ac.config.ServerPort - host, portStr, err := net.SplitHostPort(hostPort) + // extra a structs.SignedResponse from the AutoConfigResponse for use in cache prepopulation + signed, err := extractSignedResponse(resp) if err != nil { - if strings.Contains(err.Error(), "missing port in address") { - host = hostPort - } else { - ac.logger.Warn("error splitting host address into IP and port", "address", hostPort, "error", err) - return nil - } - } else { - port, err = strconv.Atoi(portStr) - if err != nil { - ac.logger.Warn("Parsed port is not an integer", "port", portStr, "error", err) - return nil - } + return fmt.Errorf("failed to extract certificates from the auto-config response: %w", err) } - // resolve the host to a list of IPs - ips, err := net.LookupIP(host) - if err != nil { - ac.logger.Warn("IP resolution failed", "host", host, "error", err) - return nil + // prepopulate the cache + if err = ac.populateCertificateCache(signed); err != nil { + return fmt.Errorf("failed to populate the cache with certificate responses: %w", err) } - var addrs []net.TCPAddr - for _, ip := range ips { - addrs = append(addrs, net.TCPAddr{IP: ip, Port: port}) - } - - return addrs -} - -// recordResponse takes an AutoConfig RPC response records it with the agent -// This will persist the configuration to disk (unless in dev mode running without -// a data dir) and will reload the configuration. -func (ac *AutoConfig) recordResponse(resp *pbautoconf.AutoConfigResponse) error { - serialized, err := pbMarshaler.MarshalToString(resp) - if err != nil { - return fmt.Errorf("failed to encode auto-config response as JSON: %w", err) - } - - if err := ac.update(resp); err != nil { + // update the TLS configurator with the latest certificates + if err := ac.updateTLSFromResponse(resp); err != nil { return err } - // now that we know the configuration is generally fine including TLS certs go ahead and persist it to disk. - if ac.config.DataDir == "" { - ac.logger.Debug("not persisting auto-config settings because there is no data directory") - return nil - } - - path := filepath.Join(ac.config.DataDir, autoConfigFileName) - - err = ioutil.WriteFile(path, []byte(serialized), 0660) - if err != nil { - return fmt.Errorf("failed to write auto-config configurations: %w", err) - } - - ac.logger.Debug("auto-config settings were persisted to disk") - - return nil + return ac.persistAutoConfig(resp) } // getInitialConfigurationOnce will perform full server to TCPAddr resolution and @@ -352,7 +264,7 @@ func (ac *AutoConfig) getInitialConfigurationOnce(ctx context.Context, csr strin var resp pbautoconf.AutoConfigResponse - servers, err := ac.serverHosts() + servers, err := ac.autoConfigHosts() if err != nil { return nil, err } @@ -369,6 +281,7 @@ func (ac *AutoConfig) getInitialConfigurationOnce(ctx context.Context, csr strin ac.logger.Error("AutoConfig.InitialConfiguration RPC failed", "addr", addr.String(), "error", err) continue } + ac.logger.Debug("AutoConfig.InitialConfiguration RPC was successful") // update the Certificate with the private key we generated locally if resp.Certificate != nil { @@ -379,17 +292,17 @@ func (ac *AutoConfig) getInitialConfigurationOnce(ctx context.Context, csr strin } } - return nil, ctx.Err() + return nil, fmt.Errorf("No server successfully responded to the auto-config request") } // getInitialConfiguration implements a loop to retry calls to getInitialConfigurationOnce. // It uses the RetryWaiter on the AutoConfig object to control how often to attempt // the initial configuration process. It is also canceallable by cancelling the provided context. -func (ac *AutoConfig) getInitialConfiguration(ctx context.Context) error { +func (ac *AutoConfig) getInitialConfiguration(ctx context.Context) (*pbautoconf.AutoConfigResponse, error) { // generate a CSR csr, key, err := ac.generateCSR() if err != nil { - return err + return nil, err } // this resets the failures so that we will perform immediate request @@ -397,183 +310,95 @@ func (ac *AutoConfig) getInitialConfiguration(ctx context.Context) error { for { select { case <-wait: - resp, err := ac.getInitialConfigurationOnce(ctx, csr, key) - if resp != nil { - return ac.recordResponse(resp) + if resp, err := ac.getInitialConfigurationOnce(ctx, csr, key); err == nil && resp != nil { + return resp, nil } else if err != nil { ac.logger.Error(err.Error()) } else { - ac.logger.Error("No error returned when fetching the initial auto-configuration but no response was either") + ac.logger.Error("No error returned when fetching configuration from the servers but no response was either") } + wait = ac.acConfig.Waiter.Failed() case <-ctx.Done(): ac.logger.Info("interrupted during initial auto configuration", "err", ctx.Err()) - return ctx.Err() + return nil, ctx.Err() } } } -// generateCSR will generate a CSR for an Agent certificate. This should -// be sent along with the AutoConfig.InitialConfiguration RPC. The generated -// CSR does NOT have a real trust domain as when generating this we do -// not yet have the CA roots. The server will update the trust domain -// for us though. -func (ac *AutoConfig) generateCSR() (csr string, key string, err error) { - // We don't provide the correct host here, because we don't know any - // better at this point. Apart from the domain, we would need the - // ClusterID, which we don't have. This is why we go with - // dummyTrustDomain the first time. Subsequent CSRs will have the - // correct TrustDomain. - id := &connect.SpiffeIDAgent{ - // will be replaced - Host: dummyTrustDomain, - Datacenter: ac.config.Datacenter, - Agent: ac.config.NodeName, - } - - caConfig, err := ac.config.ConnectCAConfiguration() - if err != nil { - return "", "", fmt.Errorf("Cannot generate CSR: %w", err) - } - - conf, err := caConfig.GetCommonConfig() - if err != nil { - return "", "", fmt.Errorf("Failed to load common CA configuration: %w", err) - } - - if conf.PrivateKeyType == "" { - conf.PrivateKeyType = connect.DefaultPrivateKeyType - } - if conf.PrivateKeyBits == 0 { - conf.PrivateKeyBits = connect.DefaultPrivateKeyBits - } - - // Create a new private key - pk, pkPEM, err := connect.GeneratePrivateKeyWithConfig(conf.PrivateKeyType, conf.PrivateKeyBits) - if err != nil { - return "", "", fmt.Errorf("Failed to generate private key: %w", err) - } - - dnsNames := append([]string{"localhost"}, ac.config.AutoConfig.DNSSANs...) - ipAddresses := append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::")}, ac.config.AutoConfig.IPSANs...) - - // Create a CSR. - // - // The Common Name includes the dummy trust domain for now but Server will - // override this when it is signed anyway so it's OK. - cn := connect.AgentCN(ac.config.NodeName, dummyTrustDomain) - csr, err = connect.CreateCSR(id, cn, pk, dnsNames, ipAddresses) - if err != nil { - return "", "", err - } - - return csr, pkPEM, nil -} - -// update will take an AutoConfigResponse and do all things necessary -// to restore those settings. This currently involves updating the -// config data to be used during a call to ReadConfig, updating the -// tls Configurator and prepopulating the cache. -func (ac *AutoConfig) update(resp *pbautoconf.AutoConfigResponse) error { - ac.autoConfigResponse = resp - - ac.autoConfigSource = config.LiteralSource{ - Name: autoConfigFileName, - Config: translateConfig(resp.Config), - } - - if err := ac.updateTLSFromResponse(resp); err != nil { - return err - } - - return nil -} - -// updateTLSFromResponse will update the TLS certificate and roots in the shared -// TLS configurator. -func (ac *AutoConfig) updateTLSFromResponse(resp *pbautoconf.AutoConfigResponse) error { - if ac.certMonitor == nil { - return nil - } - - roots, err := translateCARootsToStructs(resp.CARoots) - if err != nil { - return err - } - - cert, err := translateIssuedCertToStructs(resp.Certificate) - if err != nil { - return err - } - - update := &structs.SignedResponse{ - IssuedCert: *cert, - ConnectCARoots: *roots, - ManualCARoots: resp.ExtraCACertificates, - } - - if resp.Config != nil && resp.Config.TLS != nil { - update.VerifyServerHostname = resp.Config.TLS.VerifyServerHostname - } - - if err := ac.certMonitor.Update(update); err != nil { - return fmt.Errorf("failed to update the certificate monitor: %w", err) - } - - return nil -} - func (ac *AutoConfig) Start(ctx context.Context) error { - if ac.certMonitor == nil { + ac.Lock() + defer ac.Unlock() + + if !ac.config.AutoConfig.Enabled && !ac.config.AutoEncryptTLS { return nil } - if !ac.config.AutoConfig.Enabled { - return nil + if ac.running || ac.cancel != nil { + return fmt.Errorf("AutoConfig is already running") } - _, err := ac.certMonitor.Start(ctx) - return err + // create the top level context to control the go + // routine executing the `run` method + ctx, cancel := context.WithCancel(ctx) + + // create the channel to get cache update events through + // really we should only ever get 10 updates + ac.cacheUpdates = make(chan cache.UpdateEvent, 10) + + // setup the cache watches + cancelCertWatches, err := ac.setupCertificateCacheWatches(ctx) + if err != nil { + cancel() + return fmt.Errorf("error setting up cache watches: %w", err) + } + + // start the token update notifier + ac.tokenUpdates = ac.acConfig.Tokens.Notify(token.TokenKindAgent) + + // store the cancel funcs + ac.cancel = cancel + ac.cancelWatches = cancelCertWatches + + ac.running = true + ac.done = make(chan struct{}) + go ac.run(ctx, ac.done) + + ac.logger.Info("auto-config started") + return nil +} + +func (ac *AutoConfig) Done() <-chan struct{} { + ac.Lock() + defer ac.Unlock() + + if ac.done != nil { + return ac.done + } + + // return a closed channel to indicate that we are already done + done := make(chan struct{}) + close(done) + return done +} + +func (ac *AutoConfig) IsRunning() bool { + ac.Lock() + defer ac.Unlock() + return ac.running } func (ac *AutoConfig) Stop() bool { - if ac.certMonitor == nil { + ac.Lock() + defer ac.Unlock() + + if !ac.running { return false } - if !ac.config.AutoConfig.Enabled { - return false + if ac.cancel != nil { + ac.cancel() } - return ac.certMonitor.Stop() -} - -func (ac *AutoConfig) FallbackTLS(ctx context.Context) (*structs.SignedResponse, error) { - // generate a CSR - csr, key, err := ac.generateCSR() - if err != nil { - return nil, err - } - - resp, err := ac.getInitialConfigurationOnce(ctx, csr, key) - if err != nil { - return nil, err - } - - return extractSignedResponse(resp) -} - -func (ac *AutoConfig) RecordUpdatedCerts(resp *structs.SignedResponse) error { - var err error - ac.autoConfigResponse.ExtraCACertificates = resp.ManualCARoots - ac.autoConfigResponse.CARoots, err = translateCARootsToProtobuf(&resp.ConnectCARoots) - if err != nil { - return err - } - ac.autoConfigResponse.Certificate, err = translateIssuedCertToProtobuf(&resp.IssuedCert) - if err != nil { - return err - } - - return ac.recordResponse(ac.autoConfigResponse) + return true } diff --git a/agent/auto-config/auto_config_test.go b/agent/auto-config/auto_config_test.go index a421a45b7..e3469862a 100644 --- a/agent/auto-config/auto_config_test.go +++ b/agent/auto-config/auto_config_test.go @@ -5,115 +5,146 @@ import ( "fmt" "io/ioutil" "net" + "os" "path/filepath" - "strings" + "sync" "testing" "time" - "github.com/gogo/protobuf/types" + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/proto/pbautoconf" "github.com/hashicorp/consul/proto/pbconfig" - "github.com/hashicorp/consul/proto/pbconnect" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -type mockDirectRPC struct { - mock.Mock +type configLoader struct { + opts config.BuilderOpts } -func (m *mockDirectRPC) RPC(dc string, node string, addr net.Addr, method string, args interface{}, reply interface{}) error { - var retValues mock.Arguments - if method == "AutoConfig.InitialConfiguration" { - req := args.(*pbautoconf.AutoConfigRequest) - csr := req.CSR - req.CSR = "" - retValues = m.Called(dc, node, addr, method, args, reply) - req.CSR = csr - } else { - retValues = m.Called(dc, node, addr, method, args, reply) - } +func (c *configLoader) Load(source config.Source) (*config.RuntimeConfig, []string, error) { + return config.Load(c.opts, source) +} - switch ret := retValues.Get(0).(type) { - case error: - return ret - case func(interface{}): - ret(reply) - return nil +func (c *configLoader) addConfigHCL(cfg string) { + c.opts.HCL = append(c.opts.HCL, cfg) +} + +func requireChanNotReady(t *testing.T, ch <-chan struct{}) { + select { + case <-ch: + require.Fail(t, "chan is ready when it shouldn't be") default: - return fmt.Errorf("This should not happen, update mock direct rpc expectations") + return } } -type mockCertMonitor struct { - mock.Mock -} - -func (m *mockCertMonitor) Start(_ context.Context) (<-chan struct{}, error) { - ret := m.Called() - ch := ret.Get(0).(<-chan struct{}) - return ch, ret.Error(1) -} - -func (m *mockCertMonitor) Stop() bool { - return m.Called().Bool(0) -} - -func (m *mockCertMonitor) Update(resp *structs.SignedResponse) error { - var privKey string - // filter out real certificates as we cannot predict their values - if resp != nil && strings.HasPrefix(resp.IssuedCert.PrivateKeyPEM, "-----BEGIN") { - privKey = resp.IssuedCert.PrivateKeyPEM - resp.IssuedCert.PrivateKeyPEM = "" +func requireChanReady(t *testing.T, ch <-chan struct{}) { + select { + case <-ch: + return + default: + require.Fail(t, "chan is not ready when it should be") } - err := m.Called(resp).Error(0) - if privKey != "" { - resp.IssuedCert.PrivateKeyPEM = privKey +} + +func waitForChan(timer *time.Timer, ch <-chan struct{}) bool { + select { + case <-timer.C: + return false + case <-ch: + return true } - return err +} + +func waitForChans(timeout time.Duration, chans ...<-chan struct{}) bool { + timer := time.NewTimer(timeout) + defer timer.Stop() + + for _, ch := range chans { + if !waitForChan(timer, ch) { + return false + } + } + return true } func TestNew(t *testing.T) { type testCase struct { - config Config + modify func(*Config) err string validate func(t *testing.T, ac *AutoConfig) } cases := map[string]testCase{ "no-direct-rpc": { - config: Config{ - Loader: func(source config.Source) (cfg *config.RuntimeConfig, warnings []string, err error) { - return nil, nil, nil - }, + modify: func(c *Config) { + c.DirectRPC = nil }, err: "must provide a direct RPC delegate", }, - "no-config-loader": { + modify: func(c *Config) { + c.Loader = nil + }, err: "must provide a config loader", }, - "ok": { - config: Config{ - DirectRPC: &mockDirectRPC{}, - Loader: func(source config.Source) (cfg *config.RuntimeConfig, warnings []string, err error) { - return nil, nil, nil - }, + "no-cache": { + modify: func(c *Config) { + c.Cache = nil }, + err: "must provide a cache", + }, + "no-tls-configurator": { + modify: func(c *Config) { + c.TLSConfigurator = nil + }, + err: "must provide a TLS configurator", + }, + "no-tokens": { + modify: func(c *Config) { + c.Tokens = nil + }, + err: "must provide a token store", + }, + "ok": { validate: func(t *testing.T, ac *AutoConfig) { t.Helper() require.NotNil(t, ac.logger) + require.NotNil(t, ac.acConfig.Waiter) + require.Equal(t, time.Minute, ac.acConfig.FallbackRetry) + require.Equal(t, 10*time.Second, ac.acConfig.FallbackLeeway) }, }, } for name, tcase := range cases { t.Run(name, func(t *testing.T) { - ac, err := New(tcase.config) + cfg := Config{ + Loader: func(source config.Source) (cfg *config.RuntimeConfig, warnings []string, err error) { + return nil, nil, nil + }, + DirectRPC: newMockDirectRPC(t), + Tokens: newMockTokenStore(t), + Cache: newMockCache(t), + TLSConfigurator: newMockTLSConfigurator(t), + ServerProvider: newMockServerProvider(t), + } + + if tcase.modify != nil { + tcase.modify(&cfg) + } + + ac, err := New(cfg) if tcase.err != "" { testutil.RequireErrorContains(t, err, tcase.err) } else { @@ -157,32 +188,34 @@ func TestReadConfig(t *testing.T) { require.Same(t, ac.config, cfg) } -func setupRuntimeConfig(t *testing.T) *config.RuntimeConfig { +func setupRuntimeConfig(t *testing.T) *configLoader { t.Helper() dataDir := testutil.TempDir(t, "auto-config") - rtConfig := &config.RuntimeConfig{ - DataDir: dataDir, - Datacenter: "dc1", - NodeName: "autoconf", - BindAddr: &net.IPAddr{IP: net.ParseIP("127.0.0.1")}, + + opts := config.BuilderOpts{ + Config: config.Config{ + DataDir: &dataDir, + Datacenter: stringPointer("dc1"), + NodeName: stringPointer("autoconf"), + BindAddr: stringPointer("127.0.0.1"), + }, } - return rtConfig + return &configLoader{opts: opts} } func TestInitialConfiguration_disabled(t *testing.T) { - rtConfig := setupRuntimeConfig(t) + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + primary_datacenter = "primary" + auto_config = { + enabled = false + } + `) + + conf := newMockedConfig(t).Config + conf.Loader = loader.Load - directRPC := new(mockDirectRPC) - directRPC.Test(t) - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - rtConfig.PrimaryDatacenter = "primary" - rtConfig.AutoConfig.Enabled = false - return rtConfig, nil, nil - }, - } ac, err := New(conf) require.NoError(t, err) require.NotNil(t, ac) @@ -191,38 +224,34 @@ func TestInitialConfiguration_disabled(t *testing.T) { require.NoError(t, err) require.NotNil(t, cfg) require.Equal(t, "primary", cfg.PrimaryDatacenter) - require.NoFileExists(t, filepath.Join(rtConfig.DataDir, autoConfigFileName)) - - // ensure no RPC was made - directRPC.AssertExpectations(t) + require.NoFileExists(t, filepath.Join(*loader.opts.Config.DataDir, autoConfigFileName)) } func TestInitialConfiguration_cancelled(t *testing.T) { - rtConfig := setupRuntimeConfig(t) + mcfg := newMockedConfig(t) + + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + primary_datacenter = "primary" + auto_config = { + enabled = true + intro_token = "blarg" + server_addresses = ["127.0.0.1:8300"] + } + verify_outgoing = true + `) + mcfg.Config.Loader = loader.Load - directRPC := new(mockDirectRPC) - directRPC.Test(t) expectedRequest := pbautoconf.AutoConfigRequest{ Datacenter: "dc1", Node: "autoconf", JWT: "blarg", } - directRPC.On("RPC", "dc1", "autoconf", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 8300}, "AutoConfig.InitialConfiguration", &expectedRequest, mock.Anything).Return(fmt.Errorf("injected error")).Times(0) - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - rtConfig.PrimaryDatacenter = "primary" - rtConfig.AutoConfig = config.AutoConfig{ - Enabled: true, - IntroToken: "blarg", - ServerAddresses: []string{"127.0.0.1:8300"}, - } - rtConfig.VerifyOutgoing = true - return rtConfig, nil, nil - }, - } - ac, err := New(conf) + mcfg.directRPC.On("RPC", "dc1", "autoconf", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 8300}, "AutoConfig.InitialConfiguration", &expectedRequest, mock.Anything).Return(fmt.Errorf("injected error")).Times(0) + mcfg.serverProvider.On("FindLANServer").Return(nil).Times(0) + + ac, err := New(mcfg.Config) require.NoError(t, err) require.NotNil(t, ac) @@ -232,110 +261,55 @@ func TestInitialConfiguration_cancelled(t *testing.T) { cfg, err := ac.InitialConfiguration(ctx) testutil.RequireErrorContains(t, err, context.DeadlineExceeded.Error()) require.Nil(t, cfg) - - // ensure no RPC was made - directRPC.AssertExpectations(t) } func TestInitialConfiguration_restored(t *testing.T) { - rtConfig := setupRuntimeConfig(t) + mcfg := newMockedConfig(t) + + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + auto_config = { + enabled = true + intro_token ="blarg" + server_addresses = ["127.0.0.1:8300"] + } + verify_outgoing = true + `) + + mcfg.Config.Loader = loader.Load + + indexedRoots, cert, extraCACerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", "secret") // persist an auto config response to the data dir where it is expected - persistedFile := filepath.Join(rtConfig.DataDir, autoConfigFileName) + persistedFile := filepath.Join(*loader.opts.Config.DataDir, autoConfigFileName) response := &pbautoconf.AutoConfigResponse{ Config: &pbconfig.Config{ PrimaryDatacenter: "primary", TLS: &pbconfig.TLS{ VerifyServerHostname: true, }, - }, - CARoots: &pbconnect.CARoots{ - ActiveRootID: "active", - TrustDomain: "trust", - Roots: []*pbconnect.CARoot{ - { - ID: "active", - Name: "foo", - SerialNumber: 42, - SigningKeyID: "blarg", - NotBefore: &types.Timestamp{Seconds: 5000, Nanos: 100}, - NotAfter: &types.Timestamp{Seconds: 10000, Nanos: 9009}, - RootCert: "not an actual cert", - Active: true, + ACL: &pbconfig.ACL{ + Tokens: &pbconfig.ACLTokens{ + Agent: "secret", }, }, }, - Certificate: &pbconnect.IssuedCert{ - SerialNumber: "1234", - CertPEM: "not a cert", - PrivateKeyPEM: "private", - Agent: "foo", - AgentURI: "spiffe://blarg/agent/client/dc/foo/id/foo", - ValidAfter: &types.Timestamp{Seconds: 6000}, - ValidBefore: &types.Timestamp{Seconds: 7000}, - }, - ExtraCACertificates: []string{"blarg"}, + CARoots: mustTranslateCARootsToProtobuf(t, indexedRoots), + Certificate: mustTranslateIssuedCertToProtobuf(t, cert), + ExtraCACertificates: extraCACerts, } data, err := pbMarshaler.MarshalToString(response) require.NoError(t, err) require.NoError(t, ioutil.WriteFile(persistedFile, []byte(data), 0600)) - directRPC := new(mockDirectRPC) - directRPC.Test(t) + // recording the initial configuration even when restoring is going to update + // the agent token in the token store + mcfg.tokens.On("UpdateAgentToken", "secret", token.TokenSourceConfig).Return(true).Once() - // setup the mock certificate monitor to ensure that the initial state gets - // updated appropriately during config restoration. - certMon := new(mockCertMonitor) - certMon.Test(t) - certMon.On("Update", &structs.SignedResponse{ - IssuedCert: structs.IssuedCert{ - SerialNumber: "1234", - CertPEM: "not a cert", - PrivateKeyPEM: "private", - Agent: "foo", - AgentURI: "spiffe://blarg/agent/client/dc/foo/id/foo", - ValidAfter: time.Unix(6000, 0), - ValidBefore: time.Unix(7000, 0), - }, - ConnectCARoots: structs.IndexedCARoots{ - ActiveRootID: "active", - TrustDomain: "trust", - Roots: []*structs.CARoot{ - { - ID: "active", - Name: "foo", - SerialNumber: 42, - SigningKeyID: "blarg", - NotBefore: time.Unix(5000, 100), - NotAfter: time.Unix(10000, 9009), - RootCert: "not an actual cert", - Active: true, - // the decoding process doesn't leave this nil - IntermediateCerts: []string{}, - }, - }, - }, - ManualCARoots: []string{"blarg"}, - VerifyServerHostname: true, - }).Return(nil).Once() + // prepopulation is going to grab the token to populate the correct cache key + mcfg.tokens.On("AgentToken").Return("secret").Times(0) - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - if err := setPrimaryDatacenterFromSource(rtConfig, source); err != nil { - return nil, nil, err - } - rtConfig.AutoConfig = config.AutoConfig{ - Enabled: true, - IntroToken: "blarg", - ServerAddresses: []string{"127.0.0.1:8300"}, - } - rtConfig.VerifyOutgoing = true - return rtConfig, nil, nil - }, - CertMonitor: certMon, - } - ac, err := New(conf) + ac, err := New(mcfg.Config) require.NoError(t, err) require.NotNil(t, ac) @@ -343,64 +317,51 @@ func TestInitialConfiguration_restored(t *testing.T) { require.NoError(t, err, data) require.NotNil(t, cfg) require.Equal(t, "primary", cfg.PrimaryDatacenter) - - // ensure no RPC was made - directRPC.AssertExpectations(t) - certMon.AssertExpectations(t) -} - -func setPrimaryDatacenterFromSource(rtConfig *config.RuntimeConfig, source config.Source) error { - if source != nil { - cfg, _, err := source.Parse() - if err != nil { - return err - } - rtConfig.PrimaryDatacenter = *cfg.PrimaryDatacenter - } - return nil } func TestInitialConfiguration_success(t *testing.T) { - rtConfig := setupRuntimeConfig(t) + mcfg := newMockedConfig(t) + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + auto_config = { + enabled = true + intro_token ="blarg" + server_addresses = ["127.0.0.1:8300"] + } + verify_outgoing = true + `) + mcfg.Config.Loader = loader.Load - directRPC := new(mockDirectRPC) - directRPC.Test(t) + indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", "secret") - populateResponse := func(val interface{}) { - resp, ok := val.(*pbautoconf.AutoConfigResponse) + // this gets called when InitialConfiguration is invoked to record the token from the + // auto-config response + mcfg.tokens.On("UpdateAgentToken", "secret", token.TokenSourceConfig).Return(true).Once() + + // prepopulation is going to grab the token to populate the correct cache key + mcfg.tokens.On("AgentToken").Return("secret").Times(0) + + // no server provider + mcfg.serverProvider.On("FindLANServer").Return(nil).Times(0) + + populateResponse := func(args mock.Arguments) { + resp, ok := args.Get(5).(*pbautoconf.AutoConfigResponse) require.True(t, ok) resp.Config = &pbconfig.Config{ PrimaryDatacenter: "primary", TLS: &pbconfig.TLS{ VerifyServerHostname: true, }, - } - - resp.CARoots = &pbconnect.CARoots{ - ActiveRootID: "active", - TrustDomain: "trust", - Roots: []*pbconnect.CARoot{ - { - ID: "active", - Name: "foo", - SerialNumber: 42, - SigningKeyID: "blarg", - NotBefore: &types.Timestamp{Seconds: 5000, Nanos: 100}, - NotAfter: &types.Timestamp{Seconds: 10000, Nanos: 9009}, - RootCert: "not an actual cert", - Active: true, + ACL: &pbconfig.ACL{ + Tokens: &pbconfig.ACLTokens{ + Agent: "secret", }, }, } - resp.Certificate = &pbconnect.IssuedCert{ - SerialNumber: "1234", - CertPEM: "not a cert", - Agent: "foo", - AgentURI: "spiffe://blarg/agent/client/dc/foo/id/foo", - ValidAfter: &types.Timestamp{Seconds: 6000}, - ValidBefore: &types.Timestamp{Seconds: 7000}, - } - resp.ExtraCACertificates = []string{"blarg"} + + resp.CARoots = mustTranslateCARootsToProtobuf(t, indexedRoots) + resp.Certificate = mustTranslateIssuedCertToProtobuf(t, cert) + resp.ExtraCACertificates = extraCerts } expectedRequest := pbautoconf.AutoConfigRequest{ @@ -409,66 +370,16 @@ func TestInitialConfiguration_success(t *testing.T) { JWT: "blarg", } - directRPC.On( + mcfg.directRPC.On( "RPC", "dc1", "autoconf", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 8300}, "AutoConfig.InitialConfiguration", &expectedRequest, - &pbautoconf.AutoConfigResponse{}).Return(populateResponse) + &pbautoconf.AutoConfigResponse{}).Return(nil).Run(populateResponse) - // setup the mock certificate monitor to ensure that the initial state gets - // updated appropriately during config restoration. - certMon := new(mockCertMonitor) - certMon.Test(t) - certMon.On("Update", &structs.SignedResponse{ - IssuedCert: structs.IssuedCert{ - SerialNumber: "1234", - CertPEM: "not a cert", - PrivateKeyPEM: "", // the mock - Agent: "foo", - AgentURI: "spiffe://blarg/agent/client/dc/foo/id/foo", - ValidAfter: time.Unix(6000, 0), - ValidBefore: time.Unix(7000, 0), - }, - ConnectCARoots: structs.IndexedCARoots{ - ActiveRootID: "active", - TrustDomain: "trust", - Roots: []*structs.CARoot{ - { - ID: "active", - Name: "foo", - SerialNumber: 42, - SigningKeyID: "blarg", - NotBefore: time.Unix(5000, 100), - NotAfter: time.Unix(10000, 9009), - RootCert: "not an actual cert", - Active: true, - }, - }, - }, - ManualCARoots: []string{"blarg"}, - VerifyServerHostname: true, - }).Return(nil).Once() - - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - if err := setPrimaryDatacenterFromSource(rtConfig, source); err != nil { - return nil, nil, err - } - rtConfig.AutoConfig = config.AutoConfig{ - Enabled: true, - IntroToken: "blarg", - ServerAddresses: []string{"127.0.0.1:8300"}, - } - rtConfig.VerifyOutgoing = true - return rtConfig, nil, nil - }, - CertMonitor: certMon, - } - ac, err := New(conf) + ac, err := New(mcfg.Config) require.NoError(t, err) require.NotNil(t, ac) @@ -478,26 +389,61 @@ func TestInitialConfiguration_success(t *testing.T) { require.Equal(t, "primary", cfg.PrimaryDatacenter) // the file was written to. - persistedFile := filepath.Join(rtConfig.DataDir, autoConfigFileName) + persistedFile := filepath.Join(*loader.opts.Config.DataDir, autoConfigFileName) require.FileExists(t, persistedFile) - - // ensure no RPC was made - directRPC.AssertExpectations(t) - certMon.AssertExpectations(t) } func TestInitialConfiguration_retries(t *testing.T) { - rtConfig := setupRuntimeConfig(t) + mcfg := newMockedConfig(t) + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + auto_config = { + enabled = true + intro_token ="blarg" + server_addresses = [ + "198.18.0.1:8300", + "198.18.0.2:8398", + "198.18.0.3:8399", + "127.0.0.1:1234" + ] + } + verify_outgoing = true + `) + mcfg.Config.Loader = loader.Load - directRPC := new(mockDirectRPC) - directRPC.Test(t) + // reduce the retry wait times to make this test run faster + mcfg.Config.Waiter = lib.NewRetryWaiter(2, 0, 1*time.Millisecond, nil) - populateResponse := func(val interface{}) { - resp, ok := val.(*pbautoconf.AutoConfigResponse) + indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", "secret") + + // this gets called when InitialConfiguration is invoked to record the token from the + // auto-config response + mcfg.tokens.On("UpdateAgentToken", "secret", token.TokenSourceConfig).Return(true).Once() + + // prepopulation is going to grab the token to populate the correct cache key + mcfg.tokens.On("AgentToken").Return("secret").Times(0) + + // no server provider + mcfg.serverProvider.On("FindLANServer").Return(nil).Times(0) + + populateResponse := func(args mock.Arguments) { + resp, ok := args.Get(5).(*pbautoconf.AutoConfigResponse) require.True(t, ok) resp.Config = &pbconfig.Config{ PrimaryDatacenter: "primary", + TLS: &pbconfig.TLS{ + VerifyServerHostname: true, + }, + ACL: &pbconfig.ACL{ + Tokens: &pbconfig.ACLTokens{ + Agent: "secret", + }, + }, } + + resp.CARoots = mustTranslateCARootsToProtobuf(t, indexedRoots) + resp.Certificate = mustTranslateIssuedCertToProtobuf(t, cert) + resp.ExtraCACertificates = extraCerts } expectedRequest := pbautoconf.AutoConfigRequest{ @@ -509,7 +455,7 @@ func TestInitialConfiguration_retries(t *testing.T) { // basically the 198.18.0.* addresses should fail indefinitely. the first time through the // outer loop we inject a failure for the DNS resolution of localhost to 127.0.0.1. Then // the second time through the outer loop we allow the localhost one to work. - directRPC.On( + mcfg.directRPC.On( "RPC", "dc1", "autoconf", @@ -517,7 +463,7 @@ func TestInitialConfiguration_retries(t *testing.T) { "AutoConfig.InitialConfiguration", &expectedRequest, &pbautoconf.AutoConfigResponse{}).Return(fmt.Errorf("injected failure")).Times(0) - directRPC.On( + mcfg.directRPC.On( "RPC", "dc1", "autoconf", @@ -525,7 +471,7 @@ func TestInitialConfiguration_retries(t *testing.T) { "AutoConfig.InitialConfiguration", &expectedRequest, &pbautoconf.AutoConfigResponse{}).Return(fmt.Errorf("injected failure")).Times(0) - directRPC.On( + mcfg.directRPC.On( "RPC", "dc1", "autoconf", @@ -533,7 +479,7 @@ func TestInitialConfiguration_retries(t *testing.T) { "AutoConfig.InitialConfiguration", &expectedRequest, &pbautoconf.AutoConfigResponse{}).Return(fmt.Errorf("injected failure")).Times(0) - directRPC.On( + mcfg.directRPC.On( "RPC", "dc1", "autoconf", @@ -541,37 +487,16 @@ func TestInitialConfiguration_retries(t *testing.T) { "AutoConfig.InitialConfiguration", &expectedRequest, &pbautoconf.AutoConfigResponse{}).Return(fmt.Errorf("injected failure")).Once() - directRPC.On( + mcfg.directRPC.On( "RPC", "dc1", "autoconf", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1234}, "AutoConfig.InitialConfiguration", &expectedRequest, - &pbautoconf.AutoConfigResponse{}).Return(populateResponse) + &pbautoconf.AutoConfigResponse{}).Return(nil).Run(populateResponse).Once() - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - if err := setPrimaryDatacenterFromSource(rtConfig, source); err != nil { - return nil, nil, err - } - rtConfig.AutoConfig = config.AutoConfig{ - Enabled: true, - IntroToken: "blarg", - ServerAddresses: []string{ - "198.18.0.1:8300", - "198.18.0.2:8398", - "198.18.0.3:8399", - "127.0.0.1:1234", - }, - } - rtConfig.VerifyOutgoing = true - return rtConfig, nil, nil - }, - Waiter: lib.NewRetryWaiter(2, 0, 1*time.Millisecond, nil), - } - ac, err := New(conf) + ac, err := New(mcfg.Config) require.NoError(t, err) require.NotNil(t, ac) @@ -581,102 +506,548 @@ func TestInitialConfiguration_retries(t *testing.T) { require.Equal(t, "primary", cfg.PrimaryDatacenter) // the file was written to. - persistedFile := filepath.Join(rtConfig.DataDir, autoConfigFileName) + persistedFile := filepath.Join(*loader.opts.Config.DataDir, autoConfigFileName) require.FileExists(t, persistedFile) - - // ensure no RPC was made - directRPC.AssertExpectations(t) } -func TestAutoConfig_StartStop(t *testing.T) { - // currently the only thing running for autoconf is just the cert monitor - // so this test only needs to ensure that the cert monitor is started and - // stopped and not that anything with regards to running the cert monitor - // actually work. Those are tested in the cert-monitor package. +func TestGoRoutineManagement(t *testing.T) { + mcfg := newMockedConfig(t) + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + auto_config = { + enabled = true + intro_token ="blarg" + server_addresses = ["127.0.0.1:8300"] + } + verify_outgoing = true + `) + mcfg.Config.Loader = loader.Load - rtConfig := setupRuntimeConfig(t) + // prepopulation is going to grab the token to populate the correct cache key + mcfg.tokens.On("AgentToken").Return("secret").Times(0) - directRPC := &mockDirectRPC{} - directRPC.Test(t) - certMon := &mockCertMonitor{} - certMon.Test(t) + ac, err := New(mcfg.Config) + require.NoError(t, err) - certMon.On("Start").Return((<-chan struct{})(nil), nil).Once() - certMon.On("Stop").Return(true).Once() + // priming the config so some other requests will work properly that need to + // read from the configuration. We are going to avoid doing InitialConfiguration + // for this test as we only are really concerned with the go routine management + _, err = ac.ReadConfig() + require.NoError(t, err) - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - rtConfig.AutoConfig = config.AutoConfig{ - Enabled: true, - IntroToken: "blarg", - ServerAddresses: []string{ - "198.18.0.1", - "198.18.0.2:8398", - "198.18.0.3:8399", - "127.0.0.1:1234", - }, - } - rtConfig.VerifyOutgoing = true - return rtConfig, nil, nil - }, - CertMonitor: certMon, + var rootsCtx context.Context + var leafCtx context.Context + var ctxLock sync.Mutex + + rootsReq := ac.caRootsRequest() + mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCARootName, + &rootsReq, + rootsWatchID, + mock.Anything, + ).Return(nil).Times(2).Run(func(args mock.Arguments) { + ctxLock.Lock() + rootsCtx = args.Get(0).(context.Context) + ctxLock.Unlock() + }) + + leafReq := ac.leafCertRequest() + mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCALeafName, + &leafReq, + leafWatchID, + mock.Anything, + ).Return(nil).Times(2).Run(func(args mock.Arguments) { + ctxLock.Lock() + leafCtx = args.Get(0).(context.Context) + ctxLock.Unlock() + }) + + // we will start/stop things twice + mcfg.tokens.On("Notify", token.TokenKindAgent).Return(token.Notifier{}).Times(2) + mcfg.tokens.On("StopNotify", token.Notifier{}).Times(2) + + mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(time.Now().Add(10 * time.Minute)).Times(0) + + // ensure that auto-config isn't running + require.False(t, ac.IsRunning()) + + // ensure that nothing bad happens and that it reports as stopped + require.False(t, ac.Stop()) + + // ensure that the Done chan also reports that things are not running + // in other words the chan is immediately selectable + requireChanReady(t, ac.Done()) + + // start auto-config + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, ac.Start(ctx)) + + waitForContexts := func() bool { + ctxLock.Lock() + defer ctxLock.Unlock() + return !(rootsCtx == nil || leafCtx == nil) } - ac, err := New(conf) - require.NoError(t, err) - require.NotNil(t, ac) - cfg, err := ac.ReadConfig() - require.NoError(t, err) - ac.config = cfg - require.NoError(t, ac.Start(context.Background())) + // wait for the cache notifications to get started + require.Eventually(t, waitForContexts, 100*time.Millisecond, 10*time.Millisecond) + + // hold onto the Done chan to test for the go routine exiting + done := ac.Done() + + // ensure we report as running + require.True(t, ac.IsRunning()) + + // ensure the done chan is not selectable yet + requireChanNotReady(t, done) + + // ensure we error if we attempt to start again + err = ac.Start(ctx) + testutil.RequireErrorContains(t, err, "AutoConfig is already running") + + // now stop things - it should return true indicating that it was running + // when we attempted to stop it. require.True(t, ac.Stop()) - certMon.AssertExpectations(t) - directRPC.AssertExpectations(t) + // ensure that the go routine shuts down - it will close the done chan. Also it should cancel + // the cache watches by cancelling the context it passed into the Notify call. + require.True(t, waitForChans(100*time.Millisecond, done, leafCtx.Done(), rootsCtx.Done()), "AutoConfig didn't shut down") + require.False(t, ac.IsRunning()) + + // restart it + require.NoError(t, ac.Start(ctx)) + + // get the new Done chan + done = ac.Done() + + // ensure that context cancellation causes us to stop as well + cancel() + require.True(t, waitForChans(100*time.Millisecond, done)) } -func TestFallBackTLS(t *testing.T) { - rtConfig := setupRuntimeConfig(t) +type testAutoConfig struct { + mcfg *mockedConfig + ac *AutoConfig + tokenUpdates chan struct{} + originalToken string - directRPC := new(mockDirectRPC) - directRPC.Test(t) + initialRoots *structs.IndexedCARoots + initialCert *structs.IssuedCert + extraCerts []string +} - populateResponse := func(val interface{}) { - resp, ok := val.(*pbautoconf.AutoConfigResponse) +func startedAutoConfig(t *testing.T, autoEncrypt bool) testAutoConfig { + t.Helper() + mcfg := newMockedConfig(t) + loader := setupRuntimeConfig(t) + if !autoEncrypt { + loader.addConfigHCL(` + auto_config = { + enabled = true + intro_token ="blarg" + server_addresses = ["127.0.0.1:8300"] + } + verify_outgoing = true + `) + } else { + loader.addConfigHCL(` + auto_encrypt { + tls = true + } + verify_outgoing = true + `) + } + mcfg.Config.Loader = loader.Load + mcfg.Config.FallbackLeeway = time.Nanosecond + + originalToken := "a5deaa25-11ca-48bf-a979-4c3a7aa4b9a9" + + if !autoEncrypt { + // this gets called when InitialConfiguration is invoked to record the token from the + // auto-config response + mcfg.tokens.On("UpdateAgentToken", originalToken, token.TokenSourceConfig).Return(true).Once() + } + + // we expect this to be retrieved twice: first during cache prepopulation + // and then again when setting up the cache watch for the leaf cert. + // However one of those expectations is setup in the expectInitialTLS + // method so we only need one more here + mcfg.tokens.On("AgentToken").Return(originalToken).Once() + + if autoEncrypt { + // when using AutoEncrypt we also have to grab the token once more + // when setting up the initial RPC as the ACL token is what is used + // to authorize the request. + mcfg.tokens.On("AgentToken").Return(originalToken).Once() + } + + // this is called once during Start to initialze the token watches + tokenUpdateCh := make(chan struct{}) + tokenNotifier := token.Notifier{ + Ch: tokenUpdateCh, + } + mcfg.tokens.On("Notify", token.TokenKindAgent).Once().Return(tokenNotifier) + mcfg.tokens.On("StopNotify", tokenNotifier).Once() + + // expect the roots watch on the cache + mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCARootName, + &structs.DCSpecificRequest{Datacenter: "dc1"}, + rootsWatchID, + mock.Anything, + ).Return(nil).Once() + + mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCALeafName, + &cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: originalToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, + }, + leafWatchID, + mock.Anything, + ).Return(nil).Once() + + // override the server provider - most of the other tests set it up so that this + // always returns no server (simulating a state where we haven't joined gossip). + // this seems like a good place to ensure this other way of finding server information + // works + mcfg.serverProvider.On("FindLANServer").Once().Return(&metadata.Server{ + Addr: &net.TCPAddr{IP: net.IPv4(198, 18, 0, 1), Port: 8300}, + }) + + indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, "autoconf", "dc1", originalToken) + + mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(cert.ValidBefore).Once() + + populateResponse := func(args mock.Arguments) { + method := args.String(3) + + switch method { + case "AutoConfig.InitialConfiguration": + resp, ok := args.Get(5).(*pbautoconf.AutoConfigResponse) + require.True(t, ok) + resp.Config = &pbconfig.Config{ + PrimaryDatacenter: "primary", + TLS: &pbconfig.TLS{ + VerifyServerHostname: true, + }, + ACL: &pbconfig.ACL{ + Tokens: &pbconfig.ACLTokens{ + Agent: originalToken, + }, + }, + } + + resp.CARoots = mustTranslateCARootsToProtobuf(t, indexedRoots) + resp.Certificate = mustTranslateIssuedCertToProtobuf(t, cert) + resp.ExtraCACertificates = extraCerts + case "AutoEncrypt.Sign": + resp, ok := args.Get(5).(*structs.SignedResponse) + require.True(t, ok) + *resp = structs.SignedResponse{ + VerifyServerHostname: true, + ConnectCARoots: *indexedRoots, + IssuedCert: *cert, + ManualCARoots: extraCerts, + } + } + } + + if !autoEncrypt { + expectedRequest := pbautoconf.AutoConfigRequest{ + Datacenter: "dc1", + Node: "autoconf", + JWT: "blarg", + } + + mcfg.directRPC.On( + "RPC", + "dc1", + "autoconf", + &net.TCPAddr{IP: net.IPv4(198, 18, 0, 1), Port: 8300}, + "AutoConfig.InitialConfiguration", + &expectedRequest, + &pbautoconf.AutoConfigResponse{}).Return(nil).Run(populateResponse).Once() + } else { + expectedRequest := structs.CASignRequest{ + WriteRequest: structs.WriteRequest{Token: originalToken}, + Datacenter: "dc1", + // TODO (autoconf) Maybe in the future we should populate a CSR + // and do some manual parsing/verification of the contents. The + // bits not having to do with the signing key such as the requested + // SANs and CN. For now though the mockDirectRPC type will empty + // the CSR so we have to pass in an empty string to the expectation. + CSR: "", + } + + mcfg.directRPC.On( + "RPC", + "dc1", + "autoconf", // reusing the same name to prevent needing more configurability + &net.TCPAddr{IP: net.IPv4(198, 18, 0, 1), Port: 8300}, + "AutoEncrypt.Sign", + &expectedRequest, + &structs.SignedResponse{}).Return(nil).Run(populateResponse) + } + + ac, err := New(mcfg.Config) + require.NoError(t, err) + require.NotNil(t, ac) + + cfg, err := ac.InitialConfiguration(context.Background()) + require.NoError(t, err) + require.NotNil(t, cfg) + if !autoEncrypt { + // auto-encrypt doesn't modify the config but rather sets the value + // in the TLS configurator + require.True(t, cfg.VerifyServerHostname) + } + + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, ac.Start(ctx)) + t.Cleanup(func() { + done := ac.Done() + cancel() + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + select { + case <-done: + // do nothing + case <-timer.C: + t.Fatalf("AutoConfig wasn't stopped within 1 second after test completion") + } + }) + + return testAutoConfig{ + mcfg: mcfg, + ac: ac, + tokenUpdates: tokenUpdateCh, + originalToken: originalToken, + initialRoots: indexedRoots, + initialCert: cert, + extraCerts: extraCerts, + } +} + +// this test ensures that the cache watches are restarted with +// the updated token after receiving a token update +func TestTokenUpdate(t *testing.T) { + testAC := startedAutoConfig(t, false) + + newToken := "1a4cc445-86ed-46b4-a355-bbf5a11dddb0" + + rootsCtx, rootsCancel := context.WithCancel(context.Background()) + testAC.mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCARootName, + &structs.DCSpecificRequest{Datacenter: testAC.ac.config.Datacenter}, + rootsWatchID, + mock.Anything, + ).Return(nil).Once().Run(func(args mock.Arguments) { + rootsCancel() + }) + + leafCtx, leafCancel := context.WithCancel(context.Background()) + testAC.mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCALeafName, + &cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: newToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, + }, + leafWatchID, + mock.Anything, + ).Return(nil).Once().Run(func(args mock.Arguments) { + leafCancel() + }) + + // this will be retrieved once when resetting the leaf cert watch + testAC.mcfg.tokens.On("AgentToken").Return(newToken).Once() + + // send the notification about the token update + testAC.tokenUpdates <- struct{}{} + + // wait for the leaf cert watches + require.True(t, waitForChans(100*time.Millisecond, leafCtx.Done(), rootsCtx.Done()), "New cache watches were not started within 100ms") +} + +func TestRootsUpdate(t *testing.T) { + testAC := startedAutoConfig(t, false) + + secondCA := connect.TestCA(t, testAC.initialRoots.Roots[0]) + secondRoots := structs.IndexedCARoots{ + ActiveRootID: secondCA.ID, + TrustDomain: connect.TestClusterID, + Roots: []*structs.CARoot{ + secondCA, + testAC.initialRoots.Roots[0], + }, + QueryMeta: structs.QueryMeta{ + Index: 99, + }, + } + + updatedCtx, cancel := context.WithCancel(context.Background()) + testAC.mcfg.tlsCfg.On("UpdateAutoTLS", + testAC.extraCerts, + []string{secondCA.RootCert, testAC.initialRoots.Roots[0].RootCert}, + testAC.initialCert.CertPEM, + "redacted", + true, + ).Return(nil).Once().Run(func(args mock.Arguments) { + cancel() + }) + + // when a cache event comes in we end up recalculating the fallback timer which requires this call + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(time.Now().Add(10 * time.Minute)).Once() + + req := structs.DCSpecificRequest{Datacenter: "dc1"} + require.True(t, testAC.mcfg.cache.sendNotification(context.Background(), req.CacheInfo().Key, cache.UpdateEvent{ + CorrelationID: rootsWatchID, + Result: &secondRoots, + Meta: cache.ResultMeta{ + Index: secondRoots.Index, + }, + })) + + require.True(t, waitForChans(100*time.Millisecond, updatedCtx.Done()), "TLS certificates were not updated within the alotted time") + + // persisting these to disk happens right after the chan we are waiting for will have fired above + // however there is no deterministic way to know once its been written outside of maybe a filesystem + // event notifier. That seems a little heavy handed just for this and especially to do in any sort + // of cross platform way. + retry.Run(t, func(r *retry.R) { + resp, err := testAC.ac.readPersistedAutoConfig() + require.NoError(r, err) + require.Equal(r, secondRoots.ActiveRootID, resp.CARoots.GetActiveRootID()) + }) +} + +func TestCertUpdate(t *testing.T) { + testAC := startedAutoConfig(t, false) + secondCert := newLeaf(t, "autoconf", "dc1", testAC.initialRoots.Roots[0], 99, 10*time.Minute) + + updatedCtx, cancel := context.WithCancel(context.Background()) + testAC.mcfg.tlsCfg.On("UpdateAutoTLS", + testAC.extraCerts, + []string{testAC.initialRoots.Roots[0].RootCert}, + secondCert.CertPEM, + "redacted", + true, + ).Return(nil).Once().Run(func(args mock.Arguments) { + cancel() + }) + + // when a cache event comes in we end up recalculating the fallback timer which requires this call + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(secondCert.ValidBefore).Once() + + req := cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: testAC.originalToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, + } + require.True(t, testAC.mcfg.cache.sendNotification(context.Background(), req.CacheInfo().Key, cache.UpdateEvent{ + CorrelationID: leafWatchID, + Result: secondCert, + Meta: cache.ResultMeta{ + Index: secondCert.ModifyIndex, + }, + })) + + require.True(t, waitForChans(100*time.Millisecond, updatedCtx.Done()), "TLS certificates were not updated within the alotted time") + + // persisting these to disk happens after all the things we would wait for in assertCertUpdated + // will have fired. There is no deterministic way to know once its been written so we wrap + // this in a retry. + retry.Run(t, func(r *retry.R) { + resp, err := testAC.ac.readPersistedAutoConfig() + require.NoError(r, err) + + // ensure the roots got persisted to disk + require.Equal(r, secondCert.CertPEM, resp.Certificate.GetCertPEM()) + }) +} + +func TestFallback(t *testing.T) { + testAC := startedAutoConfig(t, false) + + // at this point everything is operating normally and we are just + // waiting for events. We are going to send a new cert that is basically + // already expired and then allow the fallback routine to kick in. + secondCert := newLeaf(t, "autoconf", "dc1", testAC.initialRoots.Roots[0], 100, time.Nanosecond) + secondCA := connect.TestCA(t, testAC.initialRoots.Roots[0]) + secondRoots := structs.IndexedCARoots{ + ActiveRootID: secondCA.ID, + TrustDomain: connect.TestClusterID, + Roots: []*structs.CARoot{ + secondCA, + testAC.initialRoots.Roots[0], + }, + QueryMeta: structs.QueryMeta{ + Index: 101, + }, + } + thirdCert := newLeaf(t, "autoconf", "dc1", secondCA, 102, 10*time.Minute) + + // setup the expectation for when the certs got updated initially + updatedCtx, updateCancel := context.WithCancel(context.Background()) + testAC.mcfg.tlsCfg.On("UpdateAutoTLS", + testAC.extraCerts, + []string{testAC.initialRoots.Roots[0].RootCert}, + secondCert.CertPEM, + "redacted", + true, + ).Return(nil).Once().Run(func(args mock.Arguments) { + updateCancel() + }) + + // when a cache event comes in we end up recalculating the fallback timer which requires this call + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(secondCert.ValidBefore).Once() + testAC.mcfg.tlsCfg.On("AutoEncryptCertExpired").Return(true).Once() + + fallbackCtx, fallbackCancel := context.WithCancel(context.Background()) + + // also testing here that we can change server IPs for ongoing operations + testAC.mcfg.serverProvider.On("FindLANServer").Once().Return(&metadata.Server{ + Addr: &net.TCPAddr{IP: net.IPv4(198, 18, 23, 2), Port: 8300}, + }) + + // after sending the notification for the cert update another InitialConfiguration RPC + // will be made to pull down the latest configuration. So we need to set up the response + // for the second RPC + populateResponse := func(args mock.Arguments) { + resp, ok := args.Get(5).(*pbautoconf.AutoConfigResponse) require.True(t, ok) resp.Config = &pbconfig.Config{ PrimaryDatacenter: "primary", TLS: &pbconfig.TLS{ VerifyServerHostname: true, }, - } - - resp.CARoots = &pbconnect.CARoots{ - ActiveRootID: "active", - TrustDomain: "trust", - Roots: []*pbconnect.CARoot{ - { - ID: "active", - Name: "foo", - SerialNumber: 42, - SigningKeyID: "blarg", - NotBefore: &types.Timestamp{Seconds: 5000, Nanos: 100}, - NotAfter: &types.Timestamp{Seconds: 10000, Nanos: 9009}, - RootCert: "not an actual cert", - Active: true, + ACL: &pbconfig.ACL{ + Tokens: &pbconfig.ACLTokens{ + Agent: testAC.originalToken, }, }, } - resp.Certificate = &pbconnect.IssuedCert{ - SerialNumber: "1234", - CertPEM: "not a cert", - Agent: "foo", - AgentURI: "spiffe://blarg/agent/client/dc/foo/id/foo", - ValidAfter: &types.Timestamp{Seconds: 6000}, - ValidBefore: &types.Timestamp{Seconds: 7000}, - } - resp.ExtraCACertificates = []string{"blarg"} + + resp.CARoots = mustTranslateCARootsToProtobuf(t, &secondRoots) + resp.Certificate = mustTranslateIssuedCertToProtobuf(t, thirdCert) + resp.ExtraCACertificates = testAC.extraCerts + + fallbackCancel() } expectedRequest := pbautoconf.AutoConfigRequest{ @@ -685,76 +1056,118 @@ func TestFallBackTLS(t *testing.T) { JWT: "blarg", } - directRPC.On( + testAC.mcfg.directRPC.On( "RPC", "dc1", "autoconf", - &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 8300}, + &net.TCPAddr{IP: net.IPv4(198, 18, 23, 2), Port: 8300}, "AutoConfig.InitialConfiguration", &expectedRequest, - &pbautoconf.AutoConfigResponse{}).Return(populateResponse) + &pbautoconf.AutoConfigResponse{}).Return(nil).Run(populateResponse).Once() - // setup the mock certificate monitor we don't expect it to be used - // as the FallbackTLS method is mainly used by the certificate monitor - // if for some reason it fails to renew the TLS certificate in time. - certMon := new(mockCertMonitor) + // this gets called when InitialConfiguration is invoked to record the token from the + // auto-config response which is how the Fallback for auto-config works + testAC.mcfg.tokens.On("UpdateAgentToken", testAC.originalToken, token.TokenSourceConfig).Return(true).Once() - conf := Config{ - DirectRPC: directRPC, - Loader: func(source config.Source) (*config.RuntimeConfig, []string, error) { - rtConfig.AutoConfig = config.AutoConfig{ - Enabled: true, - IntroToken: "blarg", - ServerAddresses: []string{"127.0.0.1:8300"}, - } - rtConfig.VerifyOutgoing = true - return rtConfig, nil, nil - }, - CertMonitor: certMon, + testAC.mcfg.expectInitialTLS(t, "autoconf", "dc1", testAC.originalToken, secondCA, &secondRoots, thirdCert, testAC.extraCerts) + + // after the second RPC we now will use the new certs validity period in the next run loop iteration + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(time.Now().Add(10 * time.Minute)).Once() + + // now that all the mocks are set up we can trigger the whole thing by sending the second expired cert + // as a cache update event. + req := cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: testAC.originalToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, } - ac, err := New(conf) - require.NoError(t, err) - require.NotNil(t, ac) - ac.config, err = ac.ReadConfig() - require.NoError(t, err) + require.True(t, testAC.mcfg.cache.sendNotification(context.Background(), req.CacheInfo().Key, cache.UpdateEvent{ + CorrelationID: leafWatchID, + Result: secondCert, + Meta: cache.ResultMeta{ + Index: secondCert.ModifyIndex, + }, + })) - actual, err := ac.FallbackTLS(context.Background()) - require.NoError(t, err) - expected := &structs.SignedResponse{ - ConnectCARoots: structs.IndexedCARoots{ - ActiveRootID: "active", - TrustDomain: "trust", - Roots: []*structs.CARoot{ - { - ID: "active", - Name: "foo", - SerialNumber: 42, - SigningKeyID: "blarg", - NotBefore: time.Unix(5000, 100), - NotAfter: time.Unix(10000, 9009), - RootCert: "not an actual cert", - Active: true, + // wait for the TLS certificates to get updated + require.True(t, waitForChans(100*time.Millisecond, updatedCtx.Done()), "TLS certificates were not updated within the alotted time") + + // now wait for the fallback routine to be invoked + require.True(t, waitForChans(100*time.Millisecond, fallbackCtx.Done()), "fallback routines did not get invoked within the alotted time") + + // persisting these to disk happens after the RPC we waited on above will have fired + // There is no deterministic way to know once its been written so we wrap this in a retry. + retry.Run(t, func(r *retry.R) { + resp, err := testAC.ac.readPersistedAutoConfig() + require.NoError(r, err) + + // ensure the roots got persisted to disk + require.Equal(r, thirdCert.CertPEM, resp.Certificate.GetCertPEM()) + require.Equal(r, secondRoots.ActiveRootID, resp.CARoots.GetActiveRootID()) + }) +} + +func TestIntroToken(t *testing.T) { + tokenFile := testutil.TempFile(t, "intro-token") + t.Cleanup(func() { os.Remove(tokenFile.Name()) }) + + tokenFileEmpty := testutil.TempFile(t, "intro-token-empty") + t.Cleanup(func() { os.Remove(tokenFileEmpty.Name()) }) + + tokenFromFile := "8ae34d3a-8adf-446a-b236-69874597cb5b" + tokenFromConfig := "3ad9b572-ea42-4e47-9cd0-53a398a98abf" + require.NoError(t, ioutil.WriteFile(tokenFile.Name(), []byte(tokenFromFile), 0600)) + + type testCase struct { + config *config.RuntimeConfig + err string + token string + } + + cases := map[string]testCase{ + "config": { + config: &config.RuntimeConfig{ + AutoConfig: config.AutoConfig{ + IntroToken: tokenFromConfig, + IntroTokenFile: tokenFile.Name(), }, }, + token: tokenFromConfig, }, - IssuedCert: structs.IssuedCert{ - SerialNumber: "1234", - CertPEM: "not a cert", - Agent: "foo", - AgentURI: "spiffe://blarg/agent/client/dc/foo/id/foo", - ValidAfter: time.Unix(6000, 0), - ValidBefore: time.Unix(7000, 0), + "file": { + config: &config.RuntimeConfig{ + AutoConfig: config.AutoConfig{ + IntroTokenFile: tokenFile.Name(), + }, + }, + token: tokenFromFile, + }, + "file-empty": { + config: &config.RuntimeConfig{ + AutoConfig: config.AutoConfig{ + IntroTokenFile: tokenFileEmpty.Name(), + }, + }, + err: "intro_token_file did not contain any token", }, - ManualCARoots: []string{"blarg"}, - VerifyServerHostname: true, } - // have to just verify that the private key was put in here but we then - // must zero it out so that the remaining equality check will pass - require.NotEmpty(t, actual.IssuedCert.PrivateKeyPEM) - actual.IssuedCert.PrivateKeyPEM = "" - require.Equal(t, expected, actual) - // ensure no RPC was made - directRPC.AssertExpectations(t) - certMon.AssertExpectations(t) + for name, tcase := range cases { + t.Run(name, func(t *testing.T) { + ac := AutoConfig{ + config: tcase.config, + } + + token, err := ac.introToken() + if tcase.err != "" { + testutil.RequireErrorContains(t, err, tcase.err) + } else { + require.NoError(t, err) + require.Equal(t, tcase.token, token) + } + }) + } + } diff --git a/agent/auto-config/auto_encrypt.go b/agent/auto-config/auto_encrypt.go new file mode 100644 index 000000000..2290bb332 --- /dev/null +++ b/agent/auto-config/auto_encrypt.go @@ -0,0 +1,111 @@ +package autoconf + +import ( + "context" + "fmt" + "net" + "strings" + + "github.com/hashicorp/consul/agent/structs" +) + +func (ac *AutoConfig) autoEncryptInitialCerts(ctx context.Context) (*structs.SignedResponse, error) { + // generate a CSR + csr, key, err := ac.generateCSR() + if err != nil { + return nil, err + } + + // this resets the failures so that we will perform immediate request + wait := ac.acConfig.Waiter.Success() + for { + select { + case <-wait: + if resp, err := ac.autoEncryptInitialCertsOnce(ctx, csr, key); err == nil && resp != nil { + return resp, nil + } else if err != nil { + ac.logger.Error(err.Error()) + } else { + ac.logger.Error("No error returned when fetching certificates from the servers but no response was either") + } + + wait = ac.acConfig.Waiter.Failed() + case <-ctx.Done(): + ac.logger.Info("interrupted during retrieval of auto-encrypt certificates", "err", ctx.Err()) + return nil, ctx.Err() + } + } +} + +func (ac *AutoConfig) autoEncryptInitialCertsOnce(ctx context.Context, csr, key string) (*structs.SignedResponse, error) { + request := structs.CASignRequest{ + WriteRequest: structs.WriteRequest{Token: ac.acConfig.Tokens.AgentToken()}, + Datacenter: ac.config.Datacenter, + CSR: csr, + } + var resp structs.SignedResponse + + servers, err := ac.autoEncryptHosts() + if err != nil { + return nil, err + } + + for _, s := range servers { + // try each IP to see if we can successfully make the request + for _, addr := range ac.resolveHost(s) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + + ac.logger.Debug("making AutoEncrypt.Sign RPC", "addr", addr.String()) + err = ac.acConfig.DirectRPC.RPC(ac.config.Datacenter, ac.config.NodeName, &addr, "AutoEncrypt.Sign", &request, &resp) + if err != nil { + ac.logger.Error("AutoEncrypt.Sign RPC failed", "addr", addr.String(), "error", err) + continue + } + + resp.IssuedCert.PrivateKeyPEM = key + return &resp, nil + } + } + return nil, fmt.Errorf("No servers successfully responded to the auto-encrypt request") +} + +func (ac *AutoConfig) autoEncryptHosts() ([]string, error) { + // use servers known to gossip if there are any + if ac.acConfig.ServerProvider != nil { + if srv := ac.acConfig.ServerProvider.FindLANServer(); srv != nil { + return []string{srv.Addr.String()}, nil + } + } + + hosts, err := ac.discoverServers(ac.config.RetryJoinLAN) + if err != nil { + return nil, err + } + + var addrs []string + + // The addresses we use for auto-encrypt are the retry join and start join + // addresses. These are for joining serf and therefore we cannot rely on the + // ports for these. This loop strips any port that may have been specified and + // will let subsequent resolveAddr calls add on the default RPC port. + for _, addr := range append(ac.config.StartJoinAddrsLAN, hosts...) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + if strings.Contains(err.Error(), "missing port in address") { + host = addr + } else { + ac.logger.Warn("error splitting host address into IP and port", "address", addr, "error", err) + continue + } + } + addrs = append(addrs, host) + } + + if len(addrs) == 0 { + return nil, fmt.Errorf("no auto-encrypt server addresses available for use") + } + + return addrs, nil +} diff --git a/agent/auto-config/auto_encrypt_test.go b/agent/auto-config/auto_encrypt_test.go new file mode 100644 index 000000000..867db9441 --- /dev/null +++ b/agent/auto-config/auto_encrypt_test.go @@ -0,0 +1,562 @@ +package autoconf + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "net" + "net/url" + "testing" + "time" + + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestAutoEncrypt_generateCSR(t *testing.T) { + type testCase struct { + conf *config.RuntimeConfig + + // to validate the csr + expectedSubject pkix.Name + expectedSigAlg x509.SignatureAlgorithm + expectedPubAlg x509.PublicKeyAlgorithm + expectedDNSNames []string + expectedIPs []net.IP + expectedURIs []*url.URL + } + + cases := map[string]testCase{ + "ip-sans": { + conf: &config.RuntimeConfig{ + Datacenter: "dc1", + NodeName: "test-node", + AutoEncryptTLS: true, + AutoEncryptIPSAN: []net.IP{net.IPv4(198, 18, 0, 1), net.IPv4(198, 18, 0, 2)}, + }, + expectedSubject: pkix.Name{ + CommonName: connect.AgentCN("test-node", unknownTrustDomain), + Names: []pkix.AttributeTypeAndValue{ + { + // 2,5,4,3 is the CommonName type ASN1 identifier + Type: asn1.ObjectIdentifier{2, 5, 4, 3}, + Value: "testnode.agnt.unknown.consul", + }, + }, + }, + expectedSigAlg: x509.ECDSAWithSHA256, + expectedPubAlg: x509.ECDSA, + expectedDNSNames: defaultDNSSANs, + expectedIPs: append(defaultIPSANs, + net.IP{198, 18, 0, 1}, + net.IP{198, 18, 0, 2}, + ), + expectedURIs: []*url.URL{ + { + Scheme: "spiffe", + Host: unknownTrustDomain, + Path: "/agent/client/dc/dc1/id/test-node", + }, + }, + }, + "dns-sans": { + conf: &config.RuntimeConfig{ + Datacenter: "dc1", + NodeName: "test-node", + AutoEncryptTLS: true, + AutoEncryptDNSSAN: []string{"foo.local", "bar.local"}, + }, + expectedSubject: pkix.Name{ + CommonName: connect.AgentCN("test-node", unknownTrustDomain), + Names: []pkix.AttributeTypeAndValue{ + { + // 2,5,4,3 is the CommonName type ASN1 identifier + Type: asn1.ObjectIdentifier{2, 5, 4, 3}, + Value: "testnode.agnt.unknown.consul", + }, + }, + }, + expectedSigAlg: x509.ECDSAWithSHA256, + expectedPubAlg: x509.ECDSA, + expectedDNSNames: append(defaultDNSSANs, "foo.local", "bar.local"), + expectedIPs: defaultIPSANs, + expectedURIs: []*url.URL{ + { + Scheme: "spiffe", + Host: unknownTrustDomain, + Path: "/agent/client/dc/dc1/id/test-node", + }, + }, + }, + } + + for name, tcase := range cases { + t.Run(name, func(t *testing.T) { + ac := AutoConfig{config: tcase.conf} + + csr, _, err := ac.generateCSR() + require.NoError(t, err) + + request, err := connect.ParseCSR(csr) + require.NoError(t, err) + require.NotNil(t, request) + + require.Equal(t, tcase.expectedSubject, request.Subject) + require.Equal(t, tcase.expectedSigAlg, request.SignatureAlgorithm) + require.Equal(t, tcase.expectedPubAlg, request.PublicKeyAlgorithm) + require.Equal(t, tcase.expectedDNSNames, request.DNSNames) + require.Equal(t, tcase.expectedIPs, request.IPAddresses) + require.Equal(t, tcase.expectedURIs, request.URIs) + }) + } +} + +func TestAutoEncrypt_hosts(t *testing.T) { + type testCase struct { + serverProvider ServerProvider + config *config.RuntimeConfig + + hosts []string + err string + } + + providerNone := newMockServerProvider(t) + providerNone.On("FindLANServer").Return(nil).Times(0) + + providerWithServer := newMockServerProvider(t) + providerWithServer.On("FindLANServer").Return(&metadata.Server{Addr: &net.TCPAddr{IP: net.IPv4(198, 18, 0, 1), Port: 1234}}).Times(0) + + cases := map[string]testCase{ + "router-override": { + serverProvider: providerWithServer, + config: &config.RuntimeConfig{ + RetryJoinLAN: []string{"127.0.0.1:9876"}, + StartJoinAddrsLAN: []string{"192.168.1.2:4321"}, + }, + hosts: []string{"198.18.0.1:1234"}, + }, + "various-addresses": { + serverProvider: providerNone, + config: &config.RuntimeConfig{ + RetryJoinLAN: []string{"198.18.0.1", "foo.com", "[2001:db8::1234]:1234", "abc.local:9876"}, + StartJoinAddrsLAN: []string{"192.168.1.1:5432", "start.local", "[::ffff:172.16.5.4]", "main.dev:6789"}, + }, + hosts: []string{ + "192.168.1.1", + "start.local", + "[::ffff:172.16.5.4]", + "main.dev", + "198.18.0.1", + "foo.com", + "2001:db8::1234", + "abc.local", + }, + }, + "split-host-port-error": { + serverProvider: providerNone, + config: &config.RuntimeConfig{ + StartJoinAddrsLAN: []string{"this-is-not:a:ip:and_port"}, + }, + err: "no auto-encrypt server addresses available for use", + }, + } + + for name, tcase := range cases { + t.Run(name, func(t *testing.T) { + ac := AutoConfig{ + config: tcase.config, + logger: testutil.Logger(t), + acConfig: Config{ + ServerProvider: tcase.serverProvider, + }, + } + + hosts, err := ac.autoEncryptHosts() + if tcase.err != "" { + testutil.RequireErrorContains(t, err, tcase.err) + } else { + require.NoError(t, err) + require.Equal(t, tcase.hosts, hosts) + } + }) + } +} + +func TestAutoEncrypt_InitialCerts(t *testing.T) { + token := "1a148388-3dd7-4db4-9eea-520424b4a86a" + datacenter := "foo" + nodeName := "bar" + + mcfg := newMockedConfig(t) + + _, indexedRoots, cert := testCerts(t, nodeName, datacenter) + + // The following are called once for each round through the auto-encrypt initial certs outer loop + // (not the per-host direct rpc attempts but the one involving the RetryWaiter) + mcfg.tokens.On("AgentToken").Return(token).Times(2) + mcfg.serverProvider.On("FindLANServer").Return(nil).Times(2) + + request := structs.CASignRequest{ + WriteRequest: structs.WriteRequest{Token: token}, + Datacenter: datacenter, + // this gets removed by the mock code as its non-deterministic what it will be + CSR: "", + } + + // first failure + mcfg.directRPC.On("RPC", + datacenter, + nodeName, + &net.TCPAddr{IP: net.IPv4(198, 18, 0, 1), Port: 8300}, + "AutoEncrypt.Sign", + &request, + &structs.SignedResponse{}, + ).Once().Return(fmt.Errorf("injected error")) + // second failure + mcfg.directRPC.On("RPC", + datacenter, + nodeName, + &net.TCPAddr{IP: net.IPv4(198, 18, 0, 2), Port: 8300}, + "AutoEncrypt.Sign", + &request, + &structs.SignedResponse{}, + ).Once().Return(fmt.Errorf("injected error")) + // third times is successfuly (second attempt to first server) + mcfg.directRPC.On("RPC", + datacenter, + nodeName, + &net.TCPAddr{IP: net.IPv4(198, 18, 0, 1), Port: 8300}, + "AutoEncrypt.Sign", + &request, + &structs.SignedResponse{}, + ).Once().Return(nil).Run(func(args mock.Arguments) { + resp, ok := args.Get(5).(*structs.SignedResponse) + require.True(t, ok) + resp.ConnectCARoots = *indexedRoots + resp.IssuedCert = *cert + resp.VerifyServerHostname = true + }) + + mcfg.Config.Waiter = lib.NewRetryWaiter(2, 0, 1*time.Millisecond, nil) + + ac := AutoConfig{ + config: &config.RuntimeConfig{ + Datacenter: datacenter, + NodeName: nodeName, + RetryJoinLAN: []string{"198.18.0.1:1234", "198.18.0.2:3456"}, + ServerPort: 8300, + }, + acConfig: mcfg.Config, + logger: testutil.Logger(t), + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + resp, err := ac.autoEncryptInitialCerts(ctx) + require.NoError(t, err) + require.NotNil(t, resp) + require.True(t, resp.VerifyServerHostname) + require.NotEmpty(t, resp.IssuedCert.PrivateKeyPEM) + resp.IssuedCert.PrivateKeyPEM = "" + cert.PrivateKeyPEM = "" + require.Equal(t, cert, &resp.IssuedCert) + require.Equal(t, indexedRoots, &resp.ConnectCARoots) + require.Empty(t, resp.ManualCARoots) +} + +func TestAutoEncrypt_InitialConfiguration(t *testing.T) { + token := "010494ae-ee45-4433-903c-a58c91297714" + nodeName := "auto-encrypt" + datacenter := "dc1" + + mcfg := newMockedConfig(t) + loader := setupRuntimeConfig(t) + loader.addConfigHCL(` + auto_encrypt { + tls = true + } + `) + loader.opts.Config.NodeName = &nodeName + mcfg.Config.Loader = loader.Load + + indexedRoots, cert, extraCerts := mcfg.setupInitialTLS(t, nodeName, datacenter, token) + + // prepopulation is going to grab the token to populate the correct cache key + mcfg.tokens.On("AgentToken").Return(token).Times(0) + + // no server provider + mcfg.serverProvider.On("FindLANServer").Return(&metadata.Server{Addr: &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 8300}}).Times(1) + + populateResponse := func(args mock.Arguments) { + resp, ok := args.Get(5).(*structs.SignedResponse) + require.True(t, ok) + *resp = structs.SignedResponse{ + VerifyServerHostname: true, + ConnectCARoots: *indexedRoots, + IssuedCert: *cert, + ManualCARoots: extraCerts, + } + } + + expectedRequest := structs.CASignRequest{ + WriteRequest: structs.WriteRequest{Token: token}, + Datacenter: datacenter, + // TODO (autoconf) Maybe in the future we should populate a CSR + // and do some manual parsing/verification of the contents. The + // bits not having to do with the signing key such as the requested + // SANs and CN. For now though the mockDirectRPC type will empty + // the CSR so we have to pass in an empty string to the expectation. + CSR: "", + } + + mcfg.directRPC.On( + "RPC", + datacenter, + nodeName, + &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 8300}, + "AutoEncrypt.Sign", + &expectedRequest, + &structs.SignedResponse{}).Return(nil).Run(populateResponse) + + ac, err := New(mcfg.Config) + require.NoError(t, err) + require.NotNil(t, ac) + + cfg, err := ac.InitialConfiguration(context.Background()) + require.NoError(t, err) + require.NotNil(t, cfg) + +} + +func TestAutoEncrypt_TokenUpdate(t *testing.T) { + testAC := startedAutoConfig(t, true) + + newToken := "1a4cc445-86ed-46b4-a355-bbf5a11dddb0" + + rootsCtx, rootsCancel := context.WithCancel(context.Background()) + testAC.mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCARootName, + &structs.DCSpecificRequest{Datacenter: testAC.ac.config.Datacenter}, + rootsWatchID, + mock.Anything, + ).Return(nil).Once().Run(func(args mock.Arguments) { + rootsCancel() + }) + + leafCtx, leafCancel := context.WithCancel(context.Background()) + testAC.mcfg.cache.On("Notify", + mock.Anything, + cachetype.ConnectCALeafName, + &cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: newToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, + }, + leafWatchID, + mock.Anything, + ).Return(nil).Once().Run(func(args mock.Arguments) { + leafCancel() + }) + + // this will be retrieved once when resetting the leaf cert watch + testAC.mcfg.tokens.On("AgentToken").Return(newToken).Once() + + // send the notification about the token update + testAC.tokenUpdates <- struct{}{} + + // wait for the leaf cert watches + require.True(t, waitForChans(100*time.Millisecond, leafCtx.Done(), rootsCtx.Done()), "New cache watches were not started within 100ms") +} + +func TestAutoEncrypt_RootsUpdate(t *testing.T) { + testAC := startedAutoConfig(t, true) + + secondCA := connect.TestCA(t, testAC.initialRoots.Roots[0]) + secondRoots := structs.IndexedCARoots{ + ActiveRootID: secondCA.ID, + TrustDomain: connect.TestClusterID, + Roots: []*structs.CARoot{ + secondCA, + testAC.initialRoots.Roots[0], + }, + QueryMeta: structs.QueryMeta{ + Index: 99, + }, + } + + updatedCtx, cancel := context.WithCancel(context.Background()) + testAC.mcfg.tlsCfg.On("UpdateAutoTLSCA", + []string{secondCA.RootCert, testAC.initialRoots.Roots[0].RootCert}, + ).Return(nil).Once().Run(func(args mock.Arguments) { + cancel() + }) + + // when a cache event comes in we end up recalculating the fallback timer which requires this call + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(time.Now().Add(10 * time.Minute)).Once() + + req := structs.DCSpecificRequest{Datacenter: "dc1"} + require.True(t, testAC.mcfg.cache.sendNotification(context.Background(), req.CacheInfo().Key, cache.UpdateEvent{ + CorrelationID: rootsWatchID, + Result: &secondRoots, + Meta: cache.ResultMeta{ + Index: secondRoots.Index, + }, + })) + + require.True(t, waitForChans(100*time.Millisecond, updatedCtx.Done()), "TLS certificates were not updated within the alotted time") +} + +func TestAutoEncrypt_CertUpdate(t *testing.T) { + testAC := startedAutoConfig(t, true) + secondCert := newLeaf(t, "autoconf", "dc1", testAC.initialRoots.Roots[0], 99, 10*time.Minute) + + updatedCtx, cancel := context.WithCancel(context.Background()) + testAC.mcfg.tlsCfg.On("UpdateAutoTLSCert", + secondCert.CertPEM, + "redacted", + ).Return(nil).Once().Run(func(args mock.Arguments) { + cancel() + }) + + // when a cache event comes in we end up recalculating the fallback timer which requires this call + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(secondCert.ValidBefore).Once() + + req := cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: testAC.originalToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, + } + require.True(t, testAC.mcfg.cache.sendNotification(context.Background(), req.CacheInfo().Key, cache.UpdateEvent{ + CorrelationID: leafWatchID, + Result: secondCert, + Meta: cache.ResultMeta{ + Index: secondCert.ModifyIndex, + }, + })) + + require.True(t, waitForChans(100*time.Millisecond, updatedCtx.Done()), "TLS certificates were not updated within the alotted time") +} + +func TestAutoEncrypt_Fallback(t *testing.T) { + testAC := startedAutoConfig(t, true) + + // at this point everything is operating normally and we are just + // waiting for events. We are going to send a new cert that is basically + // already expired and then allow the fallback routine to kick in. + secondCert := newLeaf(t, "autoconf", "dc1", testAC.initialRoots.Roots[0], 100, time.Nanosecond) + secondCA := connect.TestCA(t, testAC.initialRoots.Roots[0]) + secondRoots := structs.IndexedCARoots{ + ActiveRootID: secondCA.ID, + TrustDomain: connect.TestClusterID, + Roots: []*structs.CARoot{ + secondCA, + testAC.initialRoots.Roots[0], + }, + QueryMeta: structs.QueryMeta{ + Index: 101, + }, + } + thirdCert := newLeaf(t, "autoconf", "dc1", secondCA, 102, 10*time.Minute) + + // setup the expectation for when the certs get updated initially + updatedCtx, updateCancel := context.WithCancel(context.Background()) + testAC.mcfg.tlsCfg.On("UpdateAutoTLSCert", + secondCert.CertPEM, + "redacted", + ).Return(nil).Once().Run(func(args mock.Arguments) { + updateCancel() + }) + + // when a cache event comes in we end up recalculating the fallback timer which requires this call + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(secondCert.ValidBefore).Once() + testAC.mcfg.tlsCfg.On("AutoEncryptCertExpired").Return(true).Once() + + fallbackCtx, fallbackCancel := context.WithCancel(context.Background()) + + // also testing here that we can change server IPs for ongoing operations + testAC.mcfg.serverProvider.On("FindLANServer").Once().Return(&metadata.Server{ + Addr: &net.TCPAddr{IP: net.IPv4(198, 18, 23, 2), Port: 8300}, + }) + + // after sending the notification for the cert update another InitialConfiguration RPC + // will be made to pull down the latest configuration. So we need to set up the response + // for the second RPC + populateResponse := func(args mock.Arguments) { + resp, ok := args.Get(5).(*structs.SignedResponse) + require.True(t, ok) + *resp = structs.SignedResponse{ + VerifyServerHostname: true, + ConnectCARoots: secondRoots, + IssuedCert: *thirdCert, + ManualCARoots: testAC.extraCerts, + } + + fallbackCancel() + } + + expectedRequest := structs.CASignRequest{ + WriteRequest: structs.WriteRequest{Token: testAC.originalToken}, + Datacenter: "dc1", + // TODO (autoconf) Maybe in the future we should populate a CSR + // and do some manual parsing/verification of the contents. The + // bits not having to do with the signing key such as the requested + // SANs and CN. For now though the mockDirectRPC type will empty + // the CSR so we have to pass in an empty string to the expectation. + CSR: "", + } + + // the fallback routine to perform auto-encrypt again will need to grab this + testAC.mcfg.tokens.On("AgentToken").Return(testAC.originalToken).Once() + + testAC.mcfg.directRPC.On( + "RPC", + "dc1", + "autoconf", + &net.TCPAddr{IP: net.IPv4(198, 18, 23, 2), Port: 8300}, + "AutoEncrypt.Sign", + &expectedRequest, + &structs.SignedResponse{}).Return(nil).Run(populateResponse).Once() + + testAC.mcfg.expectInitialTLS(t, "autoconf", "dc1", testAC.originalToken, secondCA, &secondRoots, thirdCert, testAC.extraCerts) + + // after the second RPC we now will use the new certs validity period in the next run loop iteration + testAC.mcfg.tlsCfg.On("AutoEncryptCertNotAfter").Return(time.Now().Add(10 * time.Minute)).Once() + + // now that all the mocks are set up we can trigger the whole thing by sending the second expired cert + // as a cache update event. + req := cachetype.ConnectCALeafRequest{ + Datacenter: "dc1", + Agent: "autoconf", + Token: testAC.originalToken, + DNSSAN: defaultDNSSANs, + IPSAN: defaultIPSANs, + } + require.True(t, testAC.mcfg.cache.sendNotification(context.Background(), req.CacheInfo().Key, cache.UpdateEvent{ + CorrelationID: leafWatchID, + Result: secondCert, + Meta: cache.ResultMeta{ + Index: secondCert.ModifyIndex, + }, + })) + + // wait for the TLS certificates to get updated + require.True(t, waitForChans(100*time.Millisecond, updatedCtx.Done()), "TLS certificates were not updated within the alotted time") + + // now wait for the fallback routine to be invoked + require.True(t, waitForChans(100*time.Millisecond, fallbackCtx.Done()), "fallback routines did not get invoked within the alotted time") +} diff --git a/agent/auto-config/config.go b/agent/auto-config/config.go index e6d729f4d..c812cae6a 100644 --- a/agent/auto-config/config.go +++ b/agent/auto-config/config.go @@ -3,9 +3,12 @@ package autoconf import ( "context" "net" + "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/config" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/go-hclog" ) @@ -18,12 +21,35 @@ type DirectRPC interface { RPC(dc string, node string, addr net.Addr, method string, args interface{}, reply interface{}) error } -// CertMonitor is the interface that needs to be satisfied for AutoConfig to be able to -// setup monitoring of the Connect TLS certificate after we first get it. -type CertMonitor interface { - Update(*structs.SignedResponse) error - Start(context.Context) (<-chan struct{}, error) - Stop() bool +// Cache is an interface to represent the methods of the +// agent/cache.Cache struct that we care about +type Cache interface { + Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error + Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error +} + +// ServerProvider is an interface that can be used to find one server in the local DC known to +// the agent via Gossip +type ServerProvider interface { + FindLANServer() *metadata.Server +} + +// TLSConfigurator is an interface of the methods on the tlsutil.Configurator that we will require at +// runtime. +type TLSConfigurator interface { + UpdateAutoTLS(manualCAPEMs, connectCAPEMs []string, pub, priv string, verifyServerHostname bool) error + UpdateAutoTLSCA([]string) error + UpdateAutoTLSCert(pub, priv string) error + AutoEncryptCertNotAfter() time.Time + AutoEncryptCertExpired() bool +} + +// TokenStore is an interface of the methods we will need to use from the token.Store. +type TokenStore interface { + AgentToken() string + UpdateAgentToken(secret string, source token.TokenSource) bool + Notify(kind token.TokenKind) token.Notifier + StopNotify(notifier token.Notifier) } // Config contains all the tunables for AutoConfig @@ -37,6 +63,10 @@ type Config struct { // configuration. Setting this field is required. DirectRPC DirectRPC + // ServerProvider is the interfaced to be used by AutoConfig to find any + // known servers during fallback operations. + ServerProvider ServerProvider + // Waiter is a RetryWaiter to be used during retrieval of the // initial configuration. When a round of requests fails we will // wait and eventually make another round of requests (1 round @@ -49,14 +79,28 @@ type Config struct { // having the test take minutes/hours to complete. Waiter *lib.RetryWaiter - // CertMonitor is the Connect TLS Certificate Monitor to be used for ongoing - // certificate renewals and connect CA roots updates. This field is not - // strictly required but if not provided the TLS certificates retrieved - // through by the AutoConfig.InitialConfiguration RPC will not be used - // or renewed. - CertMonitor CertMonitor - // Loader merges source with the existing FileSources and returns the complete // RuntimeConfig. Loader func(source config.Source) (cfg *config.RuntimeConfig, warnings []string, err error) + + // TLSConfigurator is the shared TLS Configurator. AutoConfig will update the + // auto encrypt/auto config certs as they are renewed. + TLSConfigurator TLSConfigurator + + // Cache is an object implementing our Cache interface. The Cache + // used at runtime must be able to handle Roots and Leaf Cert watches + Cache Cache + + // FallbackLeeway is the amount of time after certificate expiration before + // invoking the fallback routine. If not set this will default to 10s. + FallbackLeeway time.Duration + + // FallbackRetry is the duration between Fallback invocations when the configured + // fallback routine returns an error. If not set this will default to 1m. + FallbackRetry time.Duration + + // Tokens is the shared token store. It is used to retrieve the current + // agent token as well as getting notifications when that token is updated. + // This field is required. + Tokens TokenStore } diff --git a/agent/auto-config/config_translate.go b/agent/auto-config/config_translate.go index b7d04d5f6..ba8f940d1 100644 --- a/agent/auto-config/config_translate.go +++ b/agent/auto-config/config_translate.go @@ -22,9 +22,9 @@ import ( // package cannot import the agent/config package without running into import cycles. func translateConfig(c *pbconfig.Config) config.Config { result := config.Config{ - Datacenter: &c.Datacenter, - PrimaryDatacenter: &c.PrimaryDatacenter, - NodeName: &c.NodeName, + Datacenter: stringPtrOrNil(c.Datacenter), + PrimaryDatacenter: stringPtrOrNil(c.PrimaryDatacenter), + NodeName: stringPtrOrNil(c.NodeName), // only output the SegmentName in the configuration if its non-empty // this will avoid a warning later when parsing the persisted configuration SegmentName: stringPtrOrNil(c.SegmentName), @@ -42,13 +42,13 @@ func translateConfig(c *pbconfig.Config) config.Config { if a := c.ACL; a != nil { result.ACL = config.ACL{ Enabled: &a.Enabled, - PolicyTTL: &a.PolicyTTL, - RoleTTL: &a.RoleTTL, - TokenTTL: &a.TokenTTL, - DownPolicy: &a.DownPolicy, - DefaultPolicy: &a.DefaultPolicy, + PolicyTTL: stringPtrOrNil(a.PolicyTTL), + RoleTTL: stringPtrOrNil(a.RoleTTL), + TokenTTL: stringPtrOrNil(a.TokenTTL), + DownPolicy: stringPtrOrNil(a.DownPolicy), + DefaultPolicy: stringPtrOrNil(a.DefaultPolicy), EnableKeyListPolicy: &a.EnableKeyListPolicy, - DisabledTTL: &a.DisabledTTL, + DisabledTTL: stringPtrOrNil(a.DisabledTTL), EnableTokenPersistence: &a.EnableTokenPersistence, } @@ -76,7 +76,7 @@ func translateConfig(c *pbconfig.Config) config.Config { result.RetryJoinLAN = g.RetryJoinLAN if e := c.Gossip.Encryption; e != nil { - result.EncryptKey = &e.Key + result.EncryptKey = stringPtrOrNil(e.Key) result.EncryptVerifyIncoming = &e.VerifyIncoming result.EncryptVerifyOutgoing = &e.VerifyOutgoing } diff --git a/agent/auto-config/config_translate_test.go b/agent/auto-config/config_translate_test.go index 18a4270a8..fa6d8febf 100644 --- a/agent/auto-config/config_translate_test.go +++ b/agent/auto-config/config_translate_test.go @@ -1,10 +1,13 @@ package autoconf import ( + "fmt" "testing" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/structs" pbconfig "github.com/hashicorp/consul/proto/pbconfig" + "github.com/hashicorp/consul/proto/pbconnect" "github.com/stretchr/testify/require" ) @@ -16,6 +19,38 @@ func boolPointer(b bool) *bool { return &b } +func translateCARootToProtobuf(in *structs.CARoot) (*pbconnect.CARoot, error) { + var out pbconnect.CARoot + if err := mapstructureTranslateToProtobuf(in, &out); err != nil { + return nil, fmt.Errorf("Failed to re-encode CA Roots: %w", err) + } + return &out, nil +} + +func mustTranslateCARootToProtobuf(t *testing.T, in *structs.CARoot) *pbconnect.CARoot { + out, err := translateCARootToProtobuf(in) + require.NoError(t, err) + return out +} + +func mustTranslateCARootsToStructs(t *testing.T, in *pbconnect.CARoots) *structs.IndexedCARoots { + out, err := translateCARootsToStructs(in) + require.NoError(t, err) + return out +} + +func mustTranslateCARootsToProtobuf(t *testing.T, in *structs.IndexedCARoots) *pbconnect.CARoots { + out, err := translateCARootsToProtobuf(in) + require.NoError(t, err) + return out +} + +func mustTranslateIssuedCertToProtobuf(t *testing.T, in *structs.IssuedCert) *pbconnect.IssuedCert { + out, err := translateIssuedCertToProtobuf(in) + require.NoError(t, err) + return out +} + func TestTranslateConfig(t *testing.T) { original := pbconfig.Config{ Datacenter: "abc", @@ -119,3 +154,9 @@ func TestTranslateConfig(t *testing.T) { translated := translateConfig(&original) require.Equal(t, expected, translated) } + +func TestCArootsTranslation(t *testing.T) { + _, indexedRoots, _ := testCerts(t, "autoconf", "dc1") + protoRoots := mustTranslateCARootsToProtobuf(t, indexedRoots) + require.Equal(t, indexedRoots, mustTranslateCARootsToStructs(t, protoRoots)) +} diff --git a/agent/auto-config/mock_test.go b/agent/auto-config/mock_test.go new file mode 100644 index 000000000..d828e6a84 --- /dev/null +++ b/agent/auto-config/mock_test.go @@ -0,0 +1,337 @@ +package autoconf + +import ( + "context" + "net" + "sync" + "testing" + "time" + + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" + "github.com/hashicorp/consul/proto/pbautoconf" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/mock" +) + +type mockDirectRPC struct { + mock.Mock +} + +func newMockDirectRPC(t *testing.T) *mockDirectRPC { + m := mockDirectRPC{} + m.Test(t) + return &m +} + +func (m *mockDirectRPC) RPC(dc string, node string, addr net.Addr, method string, args interface{}, reply interface{}) error { + var retValues mock.Arguments + if method == "AutoConfig.InitialConfiguration" { + req := args.(*pbautoconf.AutoConfigRequest) + csr := req.CSR + req.CSR = "" + retValues = m.Called(dc, node, addr, method, args, reply) + req.CSR = csr + } else if method == "AutoEncrypt.Sign" { + req := args.(*structs.CASignRequest) + csr := req.CSR + req.CSR = "" + retValues = m.Called(dc, node, addr, method, args, reply) + req.CSR = csr + } else { + retValues = m.Called(dc, node, addr, method, args, reply) + } + + return retValues.Error(0) +} + +type mockTLSConfigurator struct { + mock.Mock +} + +func newMockTLSConfigurator(t *testing.T) *mockTLSConfigurator { + m := mockTLSConfigurator{} + m.Test(t) + return &m +} + +func (m *mockTLSConfigurator) UpdateAutoTLS(manualCAPEMs, connectCAPEMs []string, pub, priv string, verifyServerHostname bool) error { + if priv != "" { + priv = "redacted" + } + + ret := m.Called(manualCAPEMs, connectCAPEMs, pub, priv, verifyServerHostname) + return ret.Error(0) +} + +func (m *mockTLSConfigurator) UpdateAutoTLSCA(pems []string) error { + ret := m.Called(pems) + return ret.Error(0) +} +func (m *mockTLSConfigurator) UpdateAutoTLSCert(pub, priv string) error { + if priv != "" { + priv = "redacted" + } + ret := m.Called(pub, priv) + return ret.Error(0) +} +func (m *mockTLSConfigurator) AutoEncryptCertNotAfter() time.Time { + ret := m.Called() + ts, _ := ret.Get(0).(time.Time) + + return ts +} +func (m *mockTLSConfigurator) AutoEncryptCertExpired() bool { + ret := m.Called() + return ret.Bool(0) +} + +type mockServerProvider struct { + mock.Mock +} + +func newMockServerProvider(t *testing.T) *mockServerProvider { + m := mockServerProvider{} + m.Test(t) + return &m +} + +func (m *mockServerProvider) FindLANServer() *metadata.Server { + ret := m.Called() + srv, _ := ret.Get(0).(*metadata.Server) + return srv +} + +type mockWatcher struct { + ch chan<- cache.UpdateEvent + done <-chan struct{} +} + +type mockCache struct { + mock.Mock + + lock sync.Mutex + watchers map[string][]mockWatcher +} + +func newMockCache(t *testing.T) *mockCache { + m := mockCache{ + watchers: make(map[string][]mockWatcher), + } + m.Test(t) + return &m +} + +func (m *mockCache) Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error { + ret := m.Called(ctx, t, r, correlationID, ch) + + err := ret.Error(0) + if err == nil { + m.lock.Lock() + key := r.CacheInfo().Key + m.watchers[key] = append(m.watchers[key], mockWatcher{ch: ch, done: ctx.Done()}) + m.lock.Unlock() + } + return err +} + +func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error { + var restore string + cert, ok := result.Value.(*structs.IssuedCert) + if ok { + // we cannot know what the private key is prior to it being injected into the cache. + // therefore redact it here and all mock expectations should take that into account + restore = cert.PrivateKeyPEM + cert.PrivateKeyPEM = "redacted" + } + + ret := m.Called(t, result, dc, token, key) + + if ok && restore != "" { + cert.PrivateKeyPEM = restore + } + return ret.Error(0) +} + +func (m *mockCache) sendNotification(ctx context.Context, key string, u cache.UpdateEvent) bool { + m.lock.Lock() + defer m.lock.Unlock() + + watchers, ok := m.watchers[key] + if !ok || len(m.watchers) < 1 { + return false + } + + var newWatchers []mockWatcher + + for _, watcher := range watchers { + select { + case watcher.ch <- u: + newWatchers = append(newWatchers, watcher) + case <-watcher.done: + // do nothing, this watcher will be removed from the list + case <-ctx.Done(): + // return doesn't matter here really, the test is being cancelled + return true + } + } + + // this removes any already cancelled watches from being sent to + m.watchers[key] = newWatchers + + return true +} + +type mockTokenStore struct { + mock.Mock +} + +func newMockTokenStore(t *testing.T) *mockTokenStore { + m := mockTokenStore{} + m.Test(t) + return &m +} + +func (m *mockTokenStore) AgentToken() string { + ret := m.Called() + return ret.String(0) +} + +func (m *mockTokenStore) UpdateAgentToken(secret string, source token.TokenSource) bool { + return m.Called(secret, source).Bool(0) +} + +func (m *mockTokenStore) Notify(kind token.TokenKind) token.Notifier { + ret := m.Called(kind) + n, _ := ret.Get(0).(token.Notifier) + return n +} + +func (m *mockTokenStore) StopNotify(notifier token.Notifier) { + m.Called(notifier) +} + +type mockedConfig struct { + Config + + directRPC *mockDirectRPC + serverProvider *mockServerProvider + cache *mockCache + tokens *mockTokenStore + tlsCfg *mockTLSConfigurator +} + +func newMockedConfig(t *testing.T) *mockedConfig { + directRPC := newMockDirectRPC(t) + serverProvider := newMockServerProvider(t) + mcache := newMockCache(t) + tokens := newMockTokenStore(t) + tlsCfg := newMockTLSConfigurator(t) + + // I am not sure it is well defined behavior but in testing it + // out it does appear like Cleanup functions can fail tests + // Adding in the mock expectations assertions here saves us + // a bunch of code in the other test functions. + t.Cleanup(func() { + if !t.Failed() { + directRPC.AssertExpectations(t) + serverProvider.AssertExpectations(t) + mcache.AssertExpectations(t) + tokens.AssertExpectations(t) + tlsCfg.AssertExpectations(t) + } + }) + + return &mockedConfig{ + Config: Config{ + DirectRPC: directRPC, + ServerProvider: serverProvider, + Cache: mcache, + Tokens: tokens, + TLSConfigurator: tlsCfg, + Logger: testutil.Logger(t), + }, + directRPC: directRPC, + serverProvider: serverProvider, + cache: mcache, + tokens: tokens, + tlsCfg: tlsCfg, + } +} + +func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, token string, ca *structs.CARoot, indexedRoots *structs.IndexedCARoots, cert *structs.IssuedCert, extraCerts []string) { + var pems []string + for _, root := range indexedRoots.Roots { + pems = append(pems, root.RootCert) + } + + // we should update the TLS configurator with the proper certs + m.tlsCfg.On("UpdateAutoTLS", + extraCerts, + pems, + cert.CertPEM, + // auto-config handles the CSR and Key so our tests don't have + // a way to know that the key is correct or not. We do replace + // a non empty PEM with "redacted" so we can ensure that some + // certificate is being sent + "redacted", + true, + ).Return(nil).Once() + + rootRes := cache.FetchResult{Value: indexedRoots, Index: indexedRoots.QueryMeta.Index} + rootsReq := structs.DCSpecificRequest{Datacenter: datacenter} + + // we should prepopulate the cache with the CA roots + m.cache.On("Prepopulate", + cachetype.ConnectCARootName, + rootRes, + datacenter, + "", + rootsReq.CacheInfo().Key, + ).Return(nil).Once() + + leafReq := cachetype.ConnectCALeafRequest{ + Token: token, + Agent: agentName, + Datacenter: datacenter, + } + + // copy the cert and redact the private key for the mock expectation + // the actual private key will not correspond to the cert but thats + // because AutoConfig is generated a key/csr internally and sending that + // on up with the request. + copy := *cert + copy.PrivateKeyPEM = "redacted" + leafRes := cache.FetchResult{ + Value: ©, + Index: copy.RaftIndex.ModifyIndex, + State: cachetype.ConnectCALeafSuccess(ca.SigningKeyID), + } + + // we should prepopulate the cache with the agents cert + m.cache.On("Prepopulate", + cachetype.ConnectCALeafName, + leafRes, + datacenter, + token, + leafReq.Key(), + ).Return(nil).Once() + + // when prepopulating the cert in the cache we grab the token so + // we should expec that here + m.tokens.On("AgentToken").Return(token).Once() +} + +func (m *mockedConfig) setupInitialTLS(t *testing.T, agentName, datacenter, token string) (*structs.IndexedCARoots, *structs.IssuedCert, []string) { + ca, indexedRoots, cert := testCerts(t, agentName, datacenter) + + ca2 := connect.TestCA(t, nil) + extraCerts := []string{ca2.RootCert} + + m.expectInitialTLS(t, agentName, datacenter, token, ca, indexedRoots, cert, extraCerts) + return indexedRoots, cert, extraCerts +} diff --git a/agent/auto-config/persist.go b/agent/auto-config/persist.go new file mode 100644 index 000000000..9f94f445c --- /dev/null +++ b/agent/auto-config/persist.go @@ -0,0 +1,86 @@ +package autoconf + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/hashicorp/consul/proto/pbautoconf" +) + +const ( + // autoConfigFileName is the name of the file that the agent auto-config settings are + // stored in within the data directory + autoConfigFileName = "auto-config.json" +) + +var ( + pbMarshaler = &jsonpb.Marshaler{ + OrigName: false, + EnumsAsInts: false, + Indent: " ", + EmitDefaults: true, + } + + pbUnmarshaler = &jsonpb.Unmarshaler{ + AllowUnknownFields: false, + } +) + +func (ac *AutoConfig) readPersistedAutoConfig() (*pbautoconf.AutoConfigResponse, error) { + if ac.config.DataDir == "" { + // no data directory means we don't have anything to potentially load + return nil, nil + } + + path := filepath.Join(ac.config.DataDir, autoConfigFileName) + ac.logger.Debug("attempting to restore any persisted configuration", "path", path) + + content, err := ioutil.ReadFile(path) + if err == nil { + rdr := strings.NewReader(string(content)) + + var resp pbautoconf.AutoConfigResponse + if err := pbUnmarshaler.Unmarshal(rdr, &resp); err != nil { + return nil, fmt.Errorf("failed to decode persisted auto-config data: %w", err) + } + + ac.logger.Info("read persisted configuration", "path", path) + return &resp, nil + } + + if !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to load %s: %w", path, err) + } + + // ignore non-existence errors as that is an indicator that we haven't + // performed the auto configuration before + return nil, nil +} + +func (ac *AutoConfig) persistAutoConfig(resp *pbautoconf.AutoConfigResponse) error { + // now that we know the configuration is generally fine including TLS certs go ahead and persist it to disk. + if ac.config.DataDir == "" { + ac.logger.Debug("not persisting auto-config settings because there is no data directory") + return nil + } + + serialized, err := pbMarshaler.MarshalToString(resp) + if err != nil { + return fmt.Errorf("failed to encode auto-config response as JSON: %w", err) + } + + path := filepath.Join(ac.config.DataDir, autoConfigFileName) + + err = ioutil.WriteFile(path, []byte(serialized), 0660) + if err != nil { + return fmt.Errorf("failed to write auto-config configurations: %w", err) + } + + ac.logger.Debug("auto-config settings were persisted to disk") + + return nil +} diff --git a/agent/auto-config/run.go b/agent/auto-config/run.go new file mode 100644 index 000000000..6155dc6be --- /dev/null +++ b/agent/auto-config/run.go @@ -0,0 +1,192 @@ +package autoconf + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" +) + +// handleCacheEvent is used to handle event notifications from the cache for the roots +// or leaf cert watches. +func (ac *AutoConfig) handleCacheEvent(u cache.UpdateEvent) error { + switch u.CorrelationID { + case rootsWatchID: + ac.logger.Debug("roots watch fired - updating CA certificates") + if u.Err != nil { + return fmt.Errorf("root watch returned an error: %w", u.Err) + } + + roots, ok := u.Result.(*structs.IndexedCARoots) + if !ok { + return fmt.Errorf("invalid type for roots watch response: %T", u.Result) + } + + return ac.updateCARoots(roots) + case leafWatchID: + ac.logger.Debug("leaf certificate watch fired - updating TLS certificate") + if u.Err != nil { + return fmt.Errorf("leaf watch returned an error: %w", u.Err) + } + + leaf, ok := u.Result.(*structs.IssuedCert) + if !ok { + return fmt.Errorf("invalid type for agent leaf cert watch response: %T", u.Result) + } + + return ac.updateLeafCert(leaf) + } + + return nil +} + +// handleTokenUpdate is used when a notification about the agent token being updated +// is received and various watches need cancelling/restarting to use the new token. +func (ac *AutoConfig) handleTokenUpdate(ctx context.Context) error { + ac.logger.Debug("Agent token updated - resetting watches") + + // TODO (autoencrypt) Prepopulate the cache with the new token with + // the existing cache entry with the old token. The certificate doesn't + // need to change just because the token has. However there isn't a + // good way to make that happen and this behavior is benign enough + // that I am going to push off implementing it. + + // the agent token has been updated so we must update our leaf cert watch. + // this cancels the current watches before setting up new ones + ac.cancelWatches() + + // recreate the chan for cache updates. This is a precautionary measure to ensure + // that we don't accidentally get notified for the new watches being setup before + // a blocking query in the cache returns and sends data to the old chan. In theory + // the code in agent/cache/watch.go should prevent this where we specifically check + // for context cancellation prior to sending the event. However we could cancel + // it after that check and finish setting up the new watches before getting the old + // events. Both the go routine scheduler and the OS thread scheduler would have to + // be acting up for this to happen. Regardless the way to ensure we don't get events + // for the old watches is to simply replace the chan we are expecting them from. + close(ac.cacheUpdates) + ac.cacheUpdates = make(chan cache.UpdateEvent, 10) + + // restart watches - this will be done with the correct token + cancelWatches, err := ac.setupCertificateCacheWatches(ctx) + if err != nil { + return fmt.Errorf("failed to restart watches after agent token update: %w", err) + } + ac.cancelWatches = cancelWatches + return nil +} + +// handleFallback is used when the current TLS certificate has expired and the normal +// updating mechanisms have failed to renew it quickly enough. This function will +// use the configured fallback mechanism to retrieve a new cert and start monitoring +// that one. +func (ac *AutoConfig) handleFallback(ctx context.Context) error { + ac.logger.Warn("agent's client certificate has expired") + // Background because the context is mainly useful when the agent is first starting up. + switch { + case ac.config.AutoConfig.Enabled: + resp, err := ac.getInitialConfiguration(ctx) + if err != nil { + return fmt.Errorf("error while retrieving new agent certificates via auto-config: %w", err) + } + + return ac.recordInitialConfiguration(resp) + case ac.config.AutoEncryptTLS: + reply, err := ac.autoEncryptInitialCerts(ctx) + if err != nil { + return fmt.Errorf("error while retrieving new agent certificate via auto-encrypt: %w", err) + } + return ac.setInitialTLSCertificates(reply) + default: + return fmt.Errorf("logic error: either auto-encrypt or auto-config must be enabled") + } +} + +// run is the private method to be spawn by the Start method for +// executing the main monitoring loop. +func (ac *AutoConfig) run(ctx context.Context, exit chan struct{}) { + // The fallbackTimer is used to notify AFTER the agents + // leaf certificate has expired and where we need + // to fall back to the less secure RPC endpoint just like + // if the agent was starting up new. + // + // Check 10sec (fallback leeway duration) after cert + // expires. The agent cache should be handling the expiration + // and renew it before then. + // + // If there is no cert, AutoEncryptCertNotAfter returns + // a value in the past which immediately triggers the + // renew, but this case shouldn't happen because at + // this point, auto_encrypt was just being setup + // successfully. + calcFallbackInterval := func() time.Duration { + certExpiry := ac.acConfig.TLSConfigurator.AutoEncryptCertNotAfter() + return certExpiry.Add(ac.acConfig.FallbackLeeway).Sub(time.Now()) + } + fallbackTimer := time.NewTimer(calcFallbackInterval()) + + // cleanup for once we are stopped + defer func() { + // cancel the go routines performing the cache watches + ac.cancelWatches() + // ensure we don't leak the timers go routine + fallbackTimer.Stop() + // stop receiving notifications for token updates + ac.acConfig.Tokens.StopNotify(ac.tokenUpdates) + + ac.logger.Debug("auto-config has been stopped") + + ac.Lock() + ac.cancel = nil + ac.running = false + // this should be the final cleanup task as its what notifies + // the rest of the world that this go routine has exited. + close(exit) + ac.Unlock() + }() + + for { + select { + case <-ctx.Done(): + ac.logger.Debug("stopping auto-config") + return + case <-ac.tokenUpdates.Ch: + ac.logger.Debug("handling a token update event") + + if err := ac.handleTokenUpdate(ctx); err != nil { + ac.logger.Error("error in handling token update event", "error", err) + } + case u := <-ac.cacheUpdates: + ac.logger.Debug("handling a cache update event", "correlation_id", u.CorrelationID) + + if err := ac.handleCacheEvent(u); err != nil { + ac.logger.Error("error in handling cache update event", "error", err) + } + + // reset the fallback timer as the certificate may have been updated + fallbackTimer.Stop() + fallbackTimer = time.NewTimer(calcFallbackInterval()) + case <-fallbackTimer.C: + // This is a safety net in case the cert doesn't get renewed + // in time. The agent would be stuck in that case because the watches + // never use the AutoEncrypt.Sign endpoint. + + // check auto encrypt client cert expiration + if ac.acConfig.TLSConfigurator.AutoEncryptCertExpired() { + if err := ac.handleFallback(ctx); err != nil { + ac.logger.Error("error when handling a certificate expiry event", "error", err) + fallbackTimer = time.NewTimer(ac.acConfig.FallbackRetry) + } else { + fallbackTimer = time.NewTimer(calcFallbackInterval()) + } + } else { + // this shouldn't be possible. We calculate the timer duration to be the certificate + // expiration time + some leeway (10s default). So whenever we get here the certificate + // should be expired. Regardless its probably worth resetting the timer. + fallbackTimer = time.NewTimer(calcFallbackInterval()) + } + } + } +} diff --git a/agent/auto-config/server_addr.go b/agent/auto-config/server_addr.go new file mode 100644 index 000000000..98af4ae55 --- /dev/null +++ b/agent/auto-config/server_addr.go @@ -0,0 +1,111 @@ +package autoconf + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/go-discover" + discoverk8s "github.com/hashicorp/go-discover/provider/k8s" + + "github.com/hashicorp/go-hclog" +) + +func (ac *AutoConfig) discoverServers(servers []string) ([]string, error) { + providers := make(map[string]discover.Provider) + for k, v := range discover.Providers { + providers[k] = v + } + providers["k8s"] = &discoverk8s.Provider{} + + disco, err := discover.New( + discover.WithUserAgent(lib.UserAgent()), + discover.WithProviders(providers), + ) + + if err != nil { + return nil, fmt.Errorf("Failed to create go-discover resolver: %w", err) + } + + var addrs []string + for _, addr := range servers { + switch { + case strings.Contains(addr, "provider="): + resolved, err := disco.Addrs(addr, ac.logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true})) + if err != nil { + ac.logger.Error("failed to resolve go-discover auto-config servers", "configuration", addr, "err", err) + continue + } + + addrs = append(addrs, resolved...) + ac.logger.Debug("discovered auto-config servers", "servers", resolved) + default: + addrs = append(addrs, addr) + } + } + + return addrs, nil +} + +// autoConfigHosts is responsible for taking the list of server addresses +// and resolving any go-discover provider invocations. It will then return +// a list of hosts. These might be hostnames and is expected that DNS resolution +// may be performed after this function runs. Additionally these may contain +// ports so SplitHostPort could also be necessary. +func (ac *AutoConfig) autoConfigHosts() ([]string, error) { + // use servers known to gossip if there are any + if ac.acConfig.ServerProvider != nil { + if srv := ac.acConfig.ServerProvider.FindLANServer(); srv != nil { + return []string{srv.Addr.String()}, nil + } + } + + addrs, err := ac.discoverServers(ac.config.AutoConfig.ServerAddresses) + if err != nil { + return nil, err + } + + if len(addrs) == 0 { + return nil, fmt.Errorf("no auto-config server addresses available for use") + } + + return addrs, nil +} + +// resolveHost will take a single host string and convert it to a list of TCPAddrs +// This will process any port in the input as well as looking up the hostname using +// normal DNS resolution. +func (ac *AutoConfig) resolveHost(hostPort string) []net.TCPAddr { + port := ac.config.ServerPort + host, portStr, err := net.SplitHostPort(hostPort) + if err != nil { + if strings.Contains(err.Error(), "missing port in address") { + host = hostPort + } else { + ac.logger.Warn("error splitting host address into IP and port", "address", hostPort, "error", err) + return nil + } + } else { + port, err = strconv.Atoi(portStr) + if err != nil { + ac.logger.Warn("Parsed port is not an integer", "port", portStr, "error", err) + return nil + } + } + + // resolve the host to a list of IPs + ips, err := net.LookupIP(host) + if err != nil { + ac.logger.Warn("IP resolution failed", "host", host, "error", err) + return nil + } + + var addrs []net.TCPAddr + for _, ip := range ips { + addrs = append(addrs, net.TCPAddr{IP: ip, Port: port}) + } + + return addrs +} diff --git a/agent/auto-config/tls.go b/agent/auto-config/tls.go new file mode 100644 index 000000000..380c9f9f8 --- /dev/null +++ b/agent/auto-config/tls.go @@ -0,0 +1,280 @@ +package autoconf + +import ( + "context" + "fmt" + "net" + + "github.com/hashicorp/consul/agent/cache" + cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbautoconf" +) + +const ( + // ID of the roots watch + rootsWatchID = "roots" + + // ID of the leaf watch + leafWatchID = "leaf" + + unknownTrustDomain = "unknown" +) + +var ( + defaultDNSSANs = []string{"localhost"} + + defaultIPSANs = []net.IP{{127, 0, 0, 1}, net.ParseIP("::1")} +) + +func extractPEMs(roots *structs.IndexedCARoots) []string { + var pems []string + for _, root := range roots.Roots { + pems = append(pems, root.RootCert) + } + return pems +} + +// updateTLSFromResponse will update the TLS certificate and roots in the shared +// TLS configurator. +func (ac *AutoConfig) updateTLSFromResponse(resp *pbautoconf.AutoConfigResponse) error { + var pems []string + for _, root := range resp.GetCARoots().GetRoots() { + pems = append(pems, root.RootCert) + } + + err := ac.acConfig.TLSConfigurator.UpdateAutoTLS( + resp.ExtraCACertificates, + pems, + resp.Certificate.GetCertPEM(), + resp.Certificate.GetPrivateKeyPEM(), + resp.Config.GetTLS().GetVerifyServerHostname(), + ) + + if err != nil { + return fmt.Errorf("Failed to update the TLS configurator with new certificates: %w", err) + } + + return nil +} + +func (ac *AutoConfig) setInitialTLSCertificates(certs *structs.SignedResponse) error { + if certs == nil { + return nil + } + + if err := ac.populateCertificateCache(certs); err != nil { + return fmt.Errorf("error populating cache with certificates: %w", err) + } + + connectCAPems := extractPEMs(&certs.ConnectCARoots) + + err := ac.acConfig.TLSConfigurator.UpdateAutoTLS( + certs.ManualCARoots, + connectCAPems, + certs.IssuedCert.CertPEM, + certs.IssuedCert.PrivateKeyPEM, + certs.VerifyServerHostname, + ) + + if err != nil { + return fmt.Errorf("error updating TLS configurator with certificates: %w", err) + } + + return nil +} + +func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) error { + cert, err := connect.ParseCert(certs.IssuedCert.CertPEM) + if err != nil { + return fmt.Errorf("Failed to parse certificate: %w", err) + } + + // prepolutate roots cache + rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.QueryMeta.Index} + rootsReq := ac.caRootsRequest() + // getting the roots doesn't require a token so in order to potentially share the cache with another + if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, "", rootsReq.CacheInfo().Key); err != nil { + return err + } + + leafReq := ac.leafCertRequest() + + // prepolutate leaf cache + certRes := cache.FetchResult{ + Value: &certs.IssuedCert, + Index: certs.IssuedCert.RaftIndex.ModifyIndex, + State: cachetype.ConnectCALeafSuccess(connect.EncodeSigningKeyID(cert.AuthorityKeyId)), + } + if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()); err != nil { + return err + } + + return nil +} + +func (ac *AutoConfig) setupCertificateCacheWatches(ctx context.Context) (context.CancelFunc, error) { + notificationCtx, cancel := context.WithCancel(ctx) + + rootsReq := ac.caRootsRequest() + err := ac.acConfig.Cache.Notify(notificationCtx, cachetype.ConnectCARootName, &rootsReq, rootsWatchID, ac.cacheUpdates) + if err != nil { + cancel() + return nil, err + } + + leafReq := ac.leafCertRequest() + err = ac.acConfig.Cache.Notify(notificationCtx, cachetype.ConnectCALeafName, &leafReq, leafWatchID, ac.cacheUpdates) + if err != nil { + cancel() + return nil, err + } + + return cancel, nil +} + +func (ac *AutoConfig) updateCARoots(roots *structs.IndexedCARoots) error { + switch { + case ac.config.AutoConfig.Enabled: + ac.Lock() + defer ac.Unlock() + var err error + ac.autoConfigResponse.CARoots, err = translateCARootsToProtobuf(roots) + if err != nil { + return err + } + + if err := ac.updateTLSFromResponse(ac.autoConfigResponse); err != nil { + return err + } + return ac.persistAutoConfig(ac.autoConfigResponse) + case ac.config.AutoEncryptTLS: + pems := extractPEMs(roots) + + if err := ac.acConfig.TLSConfigurator.UpdateAutoTLSCA(pems); err != nil { + return fmt.Errorf("failed to update Connect CA certificates: %w", err) + } + return nil + default: + return nil + } +} + +func (ac *AutoConfig) updateLeafCert(cert *structs.IssuedCert) error { + switch { + case ac.config.AutoConfig.Enabled: + ac.Lock() + defer ac.Unlock() + var err error + ac.autoConfigResponse.Certificate, err = translateIssuedCertToProtobuf(cert) + if err != nil { + return err + } + + if err := ac.updateTLSFromResponse(ac.autoConfigResponse); err != nil { + return err + } + return ac.persistAutoConfig(ac.autoConfigResponse) + case ac.config.AutoEncryptTLS: + if err := ac.acConfig.TLSConfigurator.UpdateAutoTLSCert(cert.CertPEM, cert.PrivateKeyPEM); err != nil { + return fmt.Errorf("failed to update the agent leaf cert: %w", err) + } + return nil + default: + return nil + } +} + +func (ac *AutoConfig) caRootsRequest() structs.DCSpecificRequest { + return structs.DCSpecificRequest{Datacenter: ac.config.Datacenter} +} + +func (ac *AutoConfig) leafCertRequest() cachetype.ConnectCALeafRequest { + return cachetype.ConnectCALeafRequest{ + Datacenter: ac.config.Datacenter, + Agent: ac.config.NodeName, + DNSSAN: ac.getDNSSANs(), + IPSAN: ac.getIPSANs(), + Token: ac.acConfig.Tokens.AgentToken(), + } +} + +// generateCSR will generate a CSR for an Agent certificate. This should +// be sent along with the AutoConfig.InitialConfiguration RPC or the +// AutoEncrypt.Sign RPC. The generated CSR does NOT have a real trust domain +// as when generating this we do not yet have the CA roots. The server will +// update the trust domain for us though. +func (ac *AutoConfig) generateCSR() (csr string, key string, err error) { + // We don't provide the correct host here, because we don't know any + // better at this point. Apart from the domain, we would need the + // ClusterID, which we don't have. This is why we go with + // unknownTrustDomain the first time. Subsequent CSRs will have the + // correct TrustDomain. + id := &connect.SpiffeIDAgent{ + // will be replaced + Host: unknownTrustDomain, + Datacenter: ac.config.Datacenter, + Agent: ac.config.NodeName, + } + + caConfig, err := ac.config.ConnectCAConfiguration() + if err != nil { + return "", "", fmt.Errorf("Cannot generate CSR: %w", err) + } + + conf, err := caConfig.GetCommonConfig() + if err != nil { + return "", "", fmt.Errorf("Failed to load common CA configuration: %w", err) + } + + if conf.PrivateKeyType == "" { + conf.PrivateKeyType = connect.DefaultPrivateKeyType + } + if conf.PrivateKeyBits == 0 { + conf.PrivateKeyBits = connect.DefaultPrivateKeyBits + } + + // Create a new private key + pk, pkPEM, err := connect.GeneratePrivateKeyWithConfig(conf.PrivateKeyType, conf.PrivateKeyBits) + if err != nil { + return "", "", fmt.Errorf("Failed to generate private key: %w", err) + } + + dnsNames := ac.getDNSSANs() + ipAddresses := ac.getIPSANs() + + // Create a CSR. + // + // The Common Name includes the dummy trust domain for now but Server will + // override this when it is signed anyway so it's OK. + cn := connect.AgentCN(ac.config.NodeName, unknownTrustDomain) + csr, err = connect.CreateCSR(id, cn, pk, dnsNames, ipAddresses) + if err != nil { + return "", "", err + } + + return csr, pkPEM, nil +} + +func (ac *AutoConfig) getDNSSANs() []string { + sans := defaultDNSSANs + switch { + case ac.config.AutoConfig.Enabled: + sans = append(sans, ac.config.AutoConfig.DNSSANs...) + case ac.config.AutoEncryptTLS: + sans = append(sans, ac.config.AutoEncryptDNSSAN...) + } + return sans +} + +func (ac *AutoConfig) getIPSANs() []net.IP { + sans := defaultIPSANs + switch { + case ac.config.AutoConfig.Enabled: + sans = append(sans, ac.config.AutoConfig.IPSANs...) + case ac.config.AutoEncryptTLS: + sans = append(sans, ac.config.AutoEncryptIPSAN...) + } + return sans +} diff --git a/agent/auto-config/tls_test.go b/agent/auto-config/tls_test.go new file mode 100644 index 000000000..400d7be0d --- /dev/null +++ b/agent/auto-config/tls_test.go @@ -0,0 +1,56 @@ +package autoconf + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/require" +) + +func newLeaf(t *testing.T, agentName, datacenter string, ca *structs.CARoot, idx uint64, expiration time.Duration) *structs.IssuedCert { + t.Helper() + + pub, priv, err := connect.TestAgentLeaf(t, agentName, datacenter, ca, expiration) + require.NoError(t, err) + cert, err := connect.ParseCert(pub) + require.NoError(t, err) + + spiffeID, err := connect.ParseCertURI(cert.URIs[0]) + require.NoError(t, err) + + agentID, ok := spiffeID.(*connect.SpiffeIDAgent) + require.True(t, ok, "certificate doesn't have an agent leaf cert URI") + + return &structs.IssuedCert{ + SerialNumber: cert.SerialNumber.String(), + CertPEM: pub, + PrivateKeyPEM: priv, + ValidAfter: cert.NotBefore, + ValidBefore: cert.NotAfter, + Agent: agentID.Agent, + AgentURI: agentID.URI().String(), + EnterpriseMeta: *structs.DefaultEnterpriseMeta(), + RaftIndex: structs.RaftIndex{ + CreateIndex: idx, + ModifyIndex: idx, + }, + } +} + +func testCerts(t *testing.T, agentName, datacenter string) (*structs.CARoot, *structs.IndexedCARoots, *structs.IssuedCert) { + ca := connect.TestCA(t, nil) + ca.IntermediateCerts = make([]string, 0) + cert := newLeaf(t, agentName, datacenter, ca, 1, 10*time.Minute) + indexedRoots := structs.IndexedCARoots{ + ActiveRootID: ca.ID, + TrustDomain: connect.TestClusterID, + Roots: []*structs.CARoot{ + ca, + }, + QueryMeta: structs.QueryMeta{Index: 1}, + } + + return ca, &indexedRoots, cert +} diff --git a/agent/cert-monitor/cert_monitor.go b/agent/cert-monitor/cert_monitor.go deleted file mode 100644 index 0ad50e8a1..000000000 --- a/agent/cert-monitor/cert_monitor.go +++ /dev/null @@ -1,505 +0,0 @@ -package certmon - -import ( - "context" - "fmt" - "io/ioutil" - "sync" - "time" - - "github.com/hashicorp/consul/agent/cache" - cachetype "github.com/hashicorp/consul/agent/cache-types" - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/tlsutil" - "github.com/hashicorp/go-hclog" -) - -const ( - // ID of the roots watch - rootsWatchID = "roots" - - // ID of the leaf watch - leafWatchID = "leaf" -) - -// Cache is an interface to represent the methods of the -// agent/cache.Cache struct that we care about -type Cache interface { - Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error - Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error -} - -// CertMonitor will setup the proper watches to ensure that -// the Agent's Connect TLS certificate remains up to date -type CertMonitor struct { - logger hclog.Logger - cache Cache - tlsConfigurator *tlsutil.Configurator - tokens *token.Store - leafReq cachetype.ConnectCALeafRequest - rootsReq structs.DCSpecificRequest - persist PersistFunc - fallback FallbackFunc - fallbackLeeway time.Duration - fallbackRetry time.Duration - - l sync.Mutex - running bool - // cancel is used to cancel the entire CertMonitor - // go routine. This is the main field protected - // by the mutex as it being non-nil indicates that - // the go routine has been started and is stoppable. - // note that it doesn't indcate that the go routine - // is currently running. - cancel context.CancelFunc - - // cancelWatches is used to cancel the existing - // cache watches. This is mainly only necessary - // when the Agent token changes - cancelWatches context.CancelFunc - - // cacheUpdates is the chan used to have the cache - // send us back events - cacheUpdates chan cache.UpdateEvent - // tokenUpdates is the struct used to receive - // events from the token store when the Agent - // token is updated. - tokenUpdates token.Notifier - - // this is used to keep a local copy of the certs - // keys and ca certs. It will be used to persist - // all of the local state at once. - certs structs.SignedResponse -} - -// New creates a new CertMonitor for automatically rotating -// an Agent's Connect Certificate -func New(config *Config) (*CertMonitor, error) { - logger := config.Logger - if logger == nil { - logger = hclog.New(&hclog.LoggerOptions{ - Level: 0, - Output: ioutil.Discard, - }) - } - - if config.FallbackLeeway == 0 { - config.FallbackLeeway = 10 * time.Second - } - if config.FallbackRetry == 0 { - config.FallbackRetry = time.Minute - } - - if config.Cache == nil { - return nil, fmt.Errorf("CertMonitor creation requires a Cache") - } - - if config.TLSConfigurator == nil { - return nil, fmt.Errorf("CertMonitor creation requires a TLS Configurator") - } - - if config.Fallback == nil { - return nil, fmt.Errorf("CertMonitor creation requires specifying a FallbackFunc") - } - - if config.Datacenter == "" { - return nil, fmt.Errorf("CertMonitor creation requires specifying the datacenter") - } - - if config.NodeName == "" { - return nil, fmt.Errorf("CertMonitor creation requires specifying the agent's node name") - } - - if config.Tokens == nil { - return nil, fmt.Errorf("CertMonitor creation requires specifying a token store") - } - - return &CertMonitor{ - logger: logger, - cache: config.Cache, - tokens: config.Tokens, - tlsConfigurator: config.TLSConfigurator, - persist: config.Persist, - fallback: config.Fallback, - fallbackLeeway: config.FallbackLeeway, - fallbackRetry: config.FallbackRetry, - rootsReq: structs.DCSpecificRequest{Datacenter: config.Datacenter}, - leafReq: cachetype.ConnectCALeafRequest{ - Datacenter: config.Datacenter, - Agent: config.NodeName, - DNSSAN: config.DNSSANs, - IPSAN: config.IPSANs, - }, - }, nil -} - -// Update is responsible for priming the cache with the certificates -// as well as injecting them into the TLS configurator -func (m *CertMonitor) Update(certs *structs.SignedResponse) error { - if certs == nil { - return nil - } - - m.certs = *certs - - if err := m.populateCache(certs); err != nil { - return fmt.Errorf("error populating cache with certificates: %w", err) - } - - connectCAPems := []string{} - for _, ca := range certs.ConnectCARoots.Roots { - connectCAPems = append(connectCAPems, ca.RootCert) - } - - // Note that its expected that the private key be within the IssuedCert in the - // SignedResponse. This isn't how a server would send back the response and requires - // that the recipient of the response who also has access to the private key will - // have filled it in. The Cache definitely does this but auto-encrypt/auto-config - // will need to ensure the original response is setup this way too. - err := m.tlsConfigurator.UpdateAutoTLS( - certs.ManualCARoots, - connectCAPems, - certs.IssuedCert.CertPEM, - certs.IssuedCert.PrivateKeyPEM, - certs.VerifyServerHostname) - - if err != nil { - return fmt.Errorf("error updating TLS configurator with certificates: %w", err) - } - - return nil -} - -// populateCache is responsible for inserting the certificates into the cache -func (m *CertMonitor) populateCache(resp *structs.SignedResponse) error { - cert, err := connect.ParseCert(resp.IssuedCert.CertPEM) - if err != nil { - return fmt.Errorf("Failed to parse certificate: %w", err) - } - - // prepolutate roots cache - rootRes := cache.FetchResult{Value: &resp.ConnectCARoots, Index: resp.ConnectCARoots.QueryMeta.Index} - // getting the roots doesn't require a token so in order to potentially share the cache with another - if err := m.cache.Prepopulate(cachetype.ConnectCARootName, rootRes, m.rootsReq.Datacenter, "", m.rootsReq.CacheInfo().Key); err != nil { - return err - } - - // copy the template and update the token - leafReq := m.leafReq - leafReq.Token = m.tokens.AgentToken() - - // prepolutate leaf cache - certRes := cache.FetchResult{ - Value: &resp.IssuedCert, - Index: resp.ConnectCARoots.QueryMeta.Index, - State: cachetype.ConnectCALeafSuccess(connect.EncodeSigningKeyID(cert.AuthorityKeyId)), - } - if err := m.cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()); err != nil { - return err - } - return nil -} - -// Start spawns the go routine to monitor the certificate and ensure it is -// rotated/renewed as necessary. The chan will indicate once the started -// go routine has exited -func (m *CertMonitor) Start(ctx context.Context) (<-chan struct{}, error) { - m.l.Lock() - defer m.l.Unlock() - - if m.running || m.cancel != nil { - return nil, fmt.Errorf("the CertMonitor is already running") - } - - // create the top level context to control the go - // routine executing the `run` method - ctx, cancel := context.WithCancel(ctx) - - // create the channel to get cache update events through - // really we should only ever get 10 updates - m.cacheUpdates = make(chan cache.UpdateEvent, 10) - - // setup the cache watches - cancelWatches, err := m.setupCacheWatches(ctx) - if err != nil { - cancel() - return nil, fmt.Errorf("error setting up cache watches: %w", err) - } - - // start the token update notifier - m.tokenUpdates = m.tokens.Notify(token.TokenKindAgent) - - // store the cancel funcs - m.cancel = cancel - m.cancelWatches = cancelWatches - - m.running = true - exit := make(chan struct{}) - go m.run(ctx, exit) - - m.logger.Info("certificate monitor started") - return exit, nil -} - -// Stop manually stops the go routine spawned by Start and -// returns whether the go routine was still running before -// cancelling. -// -// Note that cancelling the context passed into Start will -// also cause the go routine to stop -func (m *CertMonitor) Stop() bool { - m.l.Lock() - defer m.l.Unlock() - - if !m.running { - return false - } - - if m.cancel != nil { - m.cancel() - } - - return true -} - -// IsRunning returns whether the go routine to perform certificate monitoring -// is already running. -func (m *CertMonitor) IsRunning() bool { - m.l.Lock() - defer m.l.Unlock() - return m.running -} - -// setupCacheWatches will start both the roots and leaf cert watch with a new child -// context and an up to date ACL token. The watches are started with a new child context -// whose CancelFunc is also returned. -func (m *CertMonitor) setupCacheWatches(ctx context.Context) (context.CancelFunc, error) { - notificationCtx, cancel := context.WithCancel(ctx) - - // copy the request - rootsReq := m.rootsReq - - err := m.cache.Notify(notificationCtx, cachetype.ConnectCARootName, &rootsReq, rootsWatchID, m.cacheUpdates) - if err != nil { - cancel() - return nil, err - } - - // copy the request - leafReq := m.leafReq - leafReq.Token = m.tokens.AgentToken() - - err = m.cache.Notify(notificationCtx, cachetype.ConnectCALeafName, &leafReq, leafWatchID, m.cacheUpdates) - if err != nil { - cancel() - return nil, err - } - - return cancel, nil -} - -// handleCacheEvent is used to handle event notifications from the cache for the roots -// or leaf cert watches. -func (m *CertMonitor) handleCacheEvent(u cache.UpdateEvent) error { - switch u.CorrelationID { - case rootsWatchID: - m.logger.Debug("roots watch fired - updating CA certificates") - if u.Err != nil { - return fmt.Errorf("root watch returned an error: %w", u.Err) - } - - roots, ok := u.Result.(*structs.IndexedCARoots) - if !ok { - return fmt.Errorf("invalid type for roots watch response: %T", u.Result) - } - - m.certs.ConnectCARoots = *roots - - var pems []string - for _, root := range roots.Roots { - pems = append(pems, root.RootCert) - } - - if err := m.tlsConfigurator.UpdateAutoTLSCA(pems); err != nil { - return fmt.Errorf("failed to update Connect CA certificates: %w", err) - } - - if m.persist != nil { - copy := m.certs - if err := m.persist(©); err != nil { - return fmt.Errorf("failed to persist certificate package: %w", err) - } - } - case leafWatchID: - m.logger.Debug("leaf certificate watch fired - updating TLS certificate") - if u.Err != nil { - return fmt.Errorf("leaf watch returned an error: %w", u.Err) - } - - leaf, ok := u.Result.(*structs.IssuedCert) - if !ok { - return fmt.Errorf("invalid type for agent leaf cert watch response: %T", u.Result) - } - - m.certs.IssuedCert = *leaf - - if err := m.tlsConfigurator.UpdateAutoTLSCert(leaf.CertPEM, leaf.PrivateKeyPEM); err != nil { - return fmt.Errorf("failed to update the agent leaf cert: %w", err) - } - - if m.persist != nil { - copy := m.certs - if err := m.persist(©); err != nil { - return fmt.Errorf("failed to persist certificate package: %w", err) - } - } - } - - return nil -} - -// handleTokenUpdate is used when a notification about the agent token being updated -// is received and various watches need cancelling/restarting to use the new token. -func (m *CertMonitor) handleTokenUpdate(ctx context.Context) error { - m.logger.Debug("Agent token updated - resetting watches") - - // TODO (autoencrypt) Prepopulate the cache with the new token with - // the existing cache entry with the old token. The certificate doesn't - // need to change just because the token has. However there isn't a - // good way to make that happen and this behavior is benign enough - // that I am going to push off implementing it. - - // the agent token has been updated so we must update our leaf cert watch. - // this cancels the current watches before setting up new ones - m.cancelWatches() - - // recreate the chan for cache updates. This is a precautionary measure to ensure - // that we don't accidentally get notified for the new watches being setup before - // a blocking query in the cache returns and sends data to the old chan. In theory - // the code in agent/cache/watch.go should prevent this where we specifically check - // for context cancellation prior to sending the event. However we could cancel - // it after that check and finish setting up the new watches before getting the old - // events. Both the go routine scheduler and the OS thread scheduler would have to - // be acting up for this to happen. Regardless the way to ensure we don't get events - // for the old watches is to simply replace the chan we are expecting them from. - close(m.cacheUpdates) - m.cacheUpdates = make(chan cache.UpdateEvent, 10) - - // restart watches - this will be done with the correct token - cancelWatches, err := m.setupCacheWatches(ctx) - if err != nil { - return fmt.Errorf("failed to restart watches after agent token update: %w", err) - } - m.cancelWatches = cancelWatches - return nil -} - -// handleFallback is used when the current TLS certificate has expired and the normal -// updating mechanisms have failed to renew it quickly enough. This function will -// use the configured fallback mechanism to retrieve a new cert and start monitoring -// that one. -func (m *CertMonitor) handleFallback(ctx context.Context) error { - m.logger.Warn("agent's client certificate has expired") - // Background because the context is mainly useful when the agent is first starting up. - reply, err := m.fallback(ctx) - if err != nil { - return fmt.Errorf("error when getting new agent certificate: %w", err) - } - - if m.persist != nil { - if err := m.persist(reply); err != nil { - return fmt.Errorf("failed to persist certificate package: %w", err) - } - } - return m.Update(reply) -} - -// run is the private method to be spawn by the Start method for -// executing the main monitoring loop. -func (m *CertMonitor) run(ctx context.Context, exit chan struct{}) { - // The fallbackTimer is used to notify AFTER the agents - // leaf certificate has expired and where we need - // to fall back to the less secure RPC endpoint just like - // if the agent was starting up new. - // - // Check 10sec (fallback leeway duration) after cert - // expires. The agent cache should be handling the expiration - // and renew it before then. - // - // If there is no cert, AutoEncryptCertNotAfter returns - // a value in the past which immediately triggers the - // renew, but this case shouldn't happen because at - // this point, auto_encrypt was just being setup - // successfully. - calcFallbackInterval := func() time.Duration { - certExpiry := m.tlsConfigurator.AutoEncryptCertNotAfter() - return certExpiry.Add(m.fallbackLeeway).Sub(time.Now()) - } - fallbackTimer := time.NewTimer(calcFallbackInterval()) - - // cleanup for once we are stopped - defer func() { - // cancel the go routines performing the cache watches - m.cancelWatches() - // ensure we don't leak the timers go routine - fallbackTimer.Stop() - // stop receiving notifications for token updates - m.tokens.StopNotify(m.tokenUpdates) - - m.logger.Debug("certificate monitor has been stopped") - - m.l.Lock() - m.cancel = nil - m.running = false - m.l.Unlock() - - // this should be the final cleanup task as its what notifies - // the rest of the world that this go routine has exited. - close(exit) - }() - - for { - select { - case <-ctx.Done(): - m.logger.Debug("stopping the certificate monitor") - return - case <-m.tokenUpdates.Ch: - m.logger.Debug("handling a token update event") - - if err := m.handleTokenUpdate(ctx); err != nil { - m.logger.Error("error in handling token update event", "error", err) - } - case u := <-m.cacheUpdates: - m.logger.Debug("handling a cache update event", "correlation_id", u.CorrelationID) - - if err := m.handleCacheEvent(u); err != nil { - m.logger.Error("error in handling cache update event", "error", err) - } - - // reset the fallback timer as the certificate may have been updated - fallbackTimer.Stop() - fallbackTimer = time.NewTimer(calcFallbackInterval()) - case <-fallbackTimer.C: - // This is a safety net in case the auto_encrypt cert doesn't get renewed - // in time. The agent would be stuck in that case because the watches - // never use the AutoEncrypt.Sign endpoint. - - // check auto encrypt client cert expiration - if m.tlsConfigurator.AutoEncryptCertExpired() { - if err := m.handleFallback(ctx); err != nil { - m.logger.Error("error when handling a certificate expiry event", "error", err) - fallbackTimer = time.NewTimer(m.fallbackRetry) - } else { - fallbackTimer = time.NewTimer(calcFallbackInterval()) - } - } else { - // this shouldn't be possible. We calculate the timer duration to be the certificate - // expiration time + some leeway (10s default). So whenever we get here the certificate - // should be expired. Regardless its probably worth resetting the timer. - fallbackTimer = time.NewTimer(calcFallbackInterval()) - } - } - } -} diff --git a/agent/cert-monitor/cert_monitor_test.go b/agent/cert-monitor/cert_monitor_test.go deleted file mode 100644 index 2b6ea76d8..000000000 --- a/agent/cert-monitor/cert_monitor_test.go +++ /dev/null @@ -1,731 +0,0 @@ -package certmon - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "sync" - "testing" - "time" - - "github.com/hashicorp/consul/agent/cache" - cachetype "github.com/hashicorp/consul/agent/cache-types" - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/consul/sdk/testutil/retry" - "github.com/hashicorp/consul/tlsutil" - "github.com/hashicorp/go-uuid" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -type mockFallback struct { - mock.Mock -} - -func (m *mockFallback) fallback(ctx context.Context) (*structs.SignedResponse, error) { - ret := m.Called() - resp, _ := ret.Get(0).(*structs.SignedResponse) - return resp, ret.Error(1) -} - -type mockPersist struct { - mock.Mock -} - -func (m *mockPersist) persist(resp *structs.SignedResponse) error { - return m.Called(resp).Error(0) -} - -type mockWatcher struct { - ch chan<- cache.UpdateEvent - done <-chan struct{} -} - -type mockCache struct { - mock.Mock - - lock sync.Mutex - watchers map[string][]mockWatcher -} - -func (m *mockCache) Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error { - m.lock.Lock() - key := r.CacheInfo().Key - m.watchers[key] = append(m.watchers[key], mockWatcher{ch: ch, done: ctx.Done()}) - m.lock.Unlock() - ret := m.Called(t, r, correlationID) - return ret.Error(0) -} - -func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error { - ret := m.Called(t, result, dc, token, key) - return ret.Error(0) -} - -func (m *mockCache) sendNotification(ctx context.Context, key string, u cache.UpdateEvent) bool { - m.lock.Lock() - defer m.lock.Unlock() - - watchers, ok := m.watchers[key] - if !ok || len(m.watchers) < 1 { - return false - } - - var newWatchers []mockWatcher - - for _, watcher := range watchers { - select { - case watcher.ch <- u: - newWatchers = append(newWatchers, watcher) - case <-watcher.done: - // do nothing, this watcher will be removed from the list - case <-ctx.Done(): - // return doesn't matter here really, the test is being cancelled - return true - } - } - - // this removes any already cancelled watches from being sent to - m.watchers[key] = newWatchers - - return true -} - -func newMockCache(t *testing.T) *mockCache { - mcache := mockCache{watchers: make(map[string][]mockWatcher)} - mcache.Test(t) - return &mcache -} - -func waitForChan(timer *time.Timer, ch <-chan struct{}) bool { - select { - case <-timer.C: - return false - case <-ch: - return true - } -} - -func waitForChans(timeout time.Duration, chans ...<-chan struct{}) bool { - timer := time.NewTimer(timeout) - defer timer.Stop() - - for _, ch := range chans { - if !waitForChan(timer, ch) { - return false - } - } - return true -} - -func testTLSConfigurator(t *testing.T) *tlsutil.Configurator { - t.Helper() - logger := testutil.Logger(t) - cfg, err := tlsutil.NewConfigurator(tlsutil.Config{AutoTLS: true}, logger) - require.NoError(t, err) - return cfg -} - -func newLeaf(t *testing.T, ca *structs.CARoot, idx uint64, expiration time.Duration) *structs.IssuedCert { - t.Helper() - - pub, priv, err := connect.TestAgentLeaf(t, "node", "foo", ca, expiration) - require.NoError(t, err) - cert, err := connect.ParseCert(pub) - require.NoError(t, err) - - spiffeID, err := connect.ParseCertURI(cert.URIs[0]) - require.NoError(t, err) - - agentID, ok := spiffeID.(*connect.SpiffeIDAgent) - require.True(t, ok, "certificate doesn't have an agent leaf cert URI") - - return &structs.IssuedCert{ - SerialNumber: cert.SerialNumber.String(), - CertPEM: pub, - PrivateKeyPEM: priv, - ValidAfter: cert.NotBefore, - ValidBefore: cert.NotAfter, - Agent: agentID.Agent, - AgentURI: agentID.URI().String(), - EnterpriseMeta: *structs.DefaultEnterpriseMeta(), - RaftIndex: structs.RaftIndex{ - CreateIndex: idx, - ModifyIndex: idx, - }, - } -} - -type testCertMonitor struct { - monitor *CertMonitor - mcache *mockCache - tls *tlsutil.Configurator - tokens *token.Store - fallback *mockFallback - persist *mockPersist - - extraCACerts []string - initialCert *structs.IssuedCert - initialRoots *structs.IndexedCARoots - - // these are some variables that the CertMonitor was created with - datacenter string - nodeName string - dns []string - ips []net.IP - verifyServerHostname bool -} - -func newTestCertMonitor(t *testing.T) testCertMonitor { - t.Helper() - - tlsConfigurator := testTLSConfigurator(t) - tokens := new(token.Store) - - id, err := uuid.GenerateUUID() - require.NoError(t, err) - tokens.UpdateAgentToken(id, token.TokenSourceConfig) - - ca := connect.TestCA(t, nil) - manualCA := connect.TestCA(t, nil) - // this cert is setup to not expire quickly. this will prevent - // the test from accidentally running the fallback routine - // before we want to force that to happen. - issued := newLeaf(t, ca, 1, 10*time.Minute) - - indexedRoots := structs.IndexedCARoots{ - ActiveRootID: ca.ID, - TrustDomain: connect.TestClusterID, - Roots: []*structs.CARoot{ - ca, - }, - QueryMeta: structs.QueryMeta{ - Index: 1, - }, - } - - initialCerts := &structs.SignedResponse{ - ConnectCARoots: indexedRoots, - IssuedCert: *issued, - ManualCARoots: []string{manualCA.RootCert}, - VerifyServerHostname: true, - } - - dnsSANs := []string{"test.dev"} - ipSANs := []net.IP{net.IPv4(198, 18, 0, 1)} - - fallback := &mockFallback{} - fallback.Test(t) - persist := &mockPersist{} - persist.Test(t) - - mcache := newMockCache(t) - rootRes := cache.FetchResult{Value: &indexedRoots, Index: 1} - rootsReq := structs.DCSpecificRequest{Datacenter: "foo"} - mcache.On("Prepopulate", cachetype.ConnectCARootName, rootRes, "foo", "", rootsReq.CacheInfo().Key).Return(nil).Once() - - leafReq := cachetype.ConnectCALeafRequest{ - Token: tokens.AgentToken(), - Agent: "node", - Datacenter: "foo", - DNSSAN: dnsSANs, - IPSAN: ipSANs, - } - leafRes := cache.FetchResult{ - Value: issued, - Index: 1, - State: cachetype.ConnectCALeafSuccess(ca.SigningKeyID), - } - mcache.On("Prepopulate", cachetype.ConnectCALeafName, leafRes, "foo", tokens.AgentToken(), leafReq.Key()).Return(nil).Once() - - // we can assert more later but this should always be done. - defer mcache.AssertExpectations(t) - - cfg := new(Config). - WithCache(mcache). - WithLogger(testutil.Logger(t)). - WithTLSConfigurator(tlsConfigurator). - WithTokens(tokens). - WithFallback(fallback.fallback). - WithDNSSANs(dnsSANs). - WithIPSANs(ipSANs). - WithDatacenter("foo"). - WithNodeName("node"). - WithFallbackLeeway(time.Nanosecond). - WithFallbackRetry(time.Millisecond). - WithPersistence(persist.persist) - - monitor, err := New(cfg) - require.NoError(t, err) - require.NotNil(t, monitor) - - require.NoError(t, monitor.Update(initialCerts)) - - return testCertMonitor{ - monitor: monitor, - tls: tlsConfigurator, - tokens: tokens, - mcache: mcache, - persist: persist, - fallback: fallback, - extraCACerts: []string{manualCA.RootCert}, - initialCert: issued, - initialRoots: &indexedRoots, - datacenter: "foo", - nodeName: "node", - dns: dnsSANs, - ips: ipSANs, - verifyServerHostname: true, - } -} - -func tlsCertificateFromIssued(t *testing.T, issued *structs.IssuedCert) *tls.Certificate { - t.Helper() - - cert, err := tls.X509KeyPair([]byte(issued.CertPEM), []byte(issued.PrivateKeyPEM)) - require.NoError(t, err) - return &cert -} - -// convenience method to get a TLS Certificate from the intial issued certificate and priv key -func (cm *testCertMonitor) initialTLSCertificate(t *testing.T) *tls.Certificate { - t.Helper() - return tlsCertificateFromIssued(t, cm.initialCert) -} - -// just a convenience method to get a list of all the CA pems that we set up regardless -// of manual vs connect. -func (cm *testCertMonitor) initialCACerts() []string { - pems := cm.extraCACerts - for _, root := range cm.initialRoots.Roots { - pems = append(pems, root.RootCert) - } - return pems -} - -func (cm *testCertMonitor) assertExpectations(t *testing.T) { - cm.mcache.AssertExpectations(t) - cm.fallback.AssertExpectations(t) - cm.persist.AssertExpectations(t) -} - -func TestCertMonitor_InitialCerts(t *testing.T) { - // this also ensures that the cache was prepopulated properly - cm := newTestCertMonitor(t) - - // verify that the certificate was injected into the TLS configurator correctly - require.Equal(t, cm.initialTLSCertificate(t), cm.tls.Cert()) - // verify that the CA certs (both Connect and manual ones) were injected correctly - require.ElementsMatch(t, cm.initialCACerts(), cm.tls.CAPems()) - // verify that the auto-tls verify server hostname setting was injected correctly - require.Equal(t, cm.verifyServerHostname, cm.tls.VerifyServerHostname()) -} - -func TestCertMonitor_GoRoutineManagement(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - cm := newTestCertMonitor(t) - - // ensure that the monitor is not running - require.False(t, cm.monitor.IsRunning()) - - // ensure that nothing bad happens and that it reports as stopped - require.False(t, cm.monitor.Stop()) - - // we will never send notifications so these just ignore everything - cm.mcache.On("Notify", cachetype.ConnectCARootName, &structs.DCSpecificRequest{Datacenter: cm.datacenter}, rootsWatchID).Return(nil).Times(2) - cm.mcache.On("Notify", cachetype.ConnectCALeafName, - &cachetype.ConnectCALeafRequest{ - Token: cm.tokens.AgentToken(), - Datacenter: cm.datacenter, - Agent: cm.nodeName, - DNSSAN: cm.dns, - IPSAN: cm.ips, - }, - leafWatchID, - ).Return(nil).Times(2) - - done, err := cm.monitor.Start(ctx) - require.NoError(t, err) - require.True(t, cm.monitor.IsRunning()) - _, err = cm.monitor.Start(ctx) - testutil.RequireErrorContains(t, err, "the CertMonitor is already running") - require.True(t, cm.monitor.Stop()) - - require.True(t, waitForChans(100*time.Millisecond, done), "monitor didn't shut down") - require.False(t, cm.monitor.IsRunning()) - done, err = cm.monitor.Start(ctx) - require.NoError(t, err) - - // ensure that context cancellation causes us to stop as well - cancel() - require.True(t, waitForChans(100*time.Millisecond, done)) - - cm.assertExpectations(t) -} - -func startedCertMonitor(t *testing.T) (context.Context, testCertMonitor) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - cm := newTestCertMonitor(t) - - rootsCtx, rootsCancel := context.WithCancel(ctx) - defer rootsCancel() - leafCtx, leafCancel := context.WithCancel(ctx) - defer leafCancel() - - // initial roots watch - cm.mcache.On("Notify", cachetype.ConnectCARootName, - &structs.DCSpecificRequest{ - Datacenter: cm.datacenter, - }, - rootsWatchID). - Return(nil). - Once(). - Run(func(_ mock.Arguments) { - rootsCancel() - }) - // the initial watch after starting the monitor - cm.mcache.On("Notify", cachetype.ConnectCALeafName, - &cachetype.ConnectCALeafRequest{ - Token: cm.tokens.AgentToken(), - Datacenter: cm.datacenter, - Agent: cm.nodeName, - DNSSAN: cm.dns, - IPSAN: cm.ips, - }, - leafWatchID). - Return(nil). - Once(). - Run(func(_ mock.Arguments) { - leafCancel() - }) - - done, err := cm.monitor.Start(ctx) - require.NoError(t, err) - // this prevents logs after the test finishes - t.Cleanup(func() { - cm.monitor.Stop() - <-done - }) - - require.True(t, - waitForChans(100*time.Millisecond, rootsCtx.Done(), leafCtx.Done()), - "not all watches were started within the alotted time") - - return ctx, cm -} - -// This test ensures that the cache watches are restarted with the updated -// token after receiving a token update -func TestCertMonitor_TokenUpdate(t *testing.T) { - ctx, cm := startedCertMonitor(t) - - rootsCtx, rootsCancel := context.WithCancel(ctx) - defer rootsCancel() - leafCtx, leafCancel := context.WithCancel(ctx) - defer leafCancel() - - newToken := "8e4fe8db-162d-42d8-81ca-710fb2280ad0" - - // we expect a new roots watch because when the leaf cert watch is restarted so is the root cert watch - cm.mcache.On("Notify", cachetype.ConnectCARootName, - &structs.DCSpecificRequest{ - Datacenter: cm.datacenter, - }, - rootsWatchID). - Return(nil). - Once(). - Run(func(_ mock.Arguments) { - rootsCancel() - }) - - secondWatch := &cachetype.ConnectCALeafRequest{ - Token: newToken, - Datacenter: cm.datacenter, - Agent: cm.nodeName, - DNSSAN: cm.dns, - IPSAN: cm.ips, - } - // the new watch after updating the token - cm.mcache.On("Notify", cachetype.ConnectCALeafName, secondWatch, leafWatchID). - Return(nil). - Once(). - Run(func(args mock.Arguments) { - leafCancel() - }) - - cm.tokens.UpdateAgentToken(newToken, token.TokenSourceAPI) - - require.True(t, - waitForChans(100*time.Millisecond, rootsCtx.Done(), leafCtx.Done()), - "not all watches were restarted within the alotted time") - - cm.assertExpectations(t) -} - -func TestCertMonitor_RootsUpdate(t *testing.T) { - ctx, cm := startedCertMonitor(t) - - secondCA := connect.TestCA(t, cm.initialRoots.Roots[0]) - secondRoots := structs.IndexedCARoots{ - ActiveRootID: secondCA.ID, - TrustDomain: connect.TestClusterID, - Roots: []*structs.CARoot{ - secondCA, - cm.initialRoots.Roots[0], - }, - QueryMeta: structs.QueryMeta{ - Index: 99, - }, - } - - cm.persist.On("persist", &structs.SignedResponse{ - IssuedCert: *cm.initialCert, - ManualCARoots: cm.extraCACerts, - ConnectCARoots: secondRoots, - VerifyServerHostname: cm.verifyServerHostname, - }).Return(nil).Once() - - // assert value of the CA certs prior to updating - require.ElementsMatch(t, cm.initialCACerts(), cm.tls.CAPems()) - - req := structs.DCSpecificRequest{Datacenter: cm.datacenter} - require.True(t, cm.mcache.sendNotification(ctx, req.CacheInfo().Key, cache.UpdateEvent{ - CorrelationID: rootsWatchID, - Result: &secondRoots, - Meta: cache.ResultMeta{ - Index: secondRoots.Index, - }, - })) - - expectedCAs := append(cm.extraCACerts, secondCA.RootCert, cm.initialRoots.Roots[0].RootCert) - - // this will wait up to 200ms (8 x 25 ms waits between the 9 requests) - retry.RunWith(&retry.Counter{Count: 9, Wait: 25 * time.Millisecond}, t, func(r *retry.R) { - require.ElementsMatch(r, expectedCAs, cm.tls.CAPems()) - }) - - cm.assertExpectations(t) -} - -func TestCertMonitor_CertUpdate(t *testing.T) { - ctx, cm := startedCertMonitor(t) - - secondCert := newLeaf(t, cm.initialRoots.Roots[0], 100, 10*time.Minute) - - cm.persist.On("persist", &structs.SignedResponse{ - IssuedCert: *secondCert, - ManualCARoots: cm.extraCACerts, - ConnectCARoots: *cm.initialRoots, - VerifyServerHostname: cm.verifyServerHostname, - }).Return(nil).Once() - - // assert value of cert prior to updating the leaf - require.Equal(t, cm.initialTLSCertificate(t), cm.tls.Cert()) - - key := cm.monitor.leafReq.CacheInfo().Key - - // send the new certificate - this notifies only the watchers utilizing - // the new ACL token - require.True(t, cm.mcache.sendNotification(ctx, key, cache.UpdateEvent{ - CorrelationID: leafWatchID, - Result: secondCert, - Meta: cache.ResultMeta{ - Index: secondCert.ModifyIndex, - }, - })) - - tlsCert := tlsCertificateFromIssued(t, secondCert) - - // this will wait up to 200ms (8 x 25 ms waits between the 9 requests) - retry.RunWith(&retry.Counter{Count: 9, Wait: 25 * time.Millisecond}, t, func(r *retry.R) { - require.Equal(r, tlsCert, cm.tls.Cert()) - }) - - cm.assertExpectations(t) -} - -func TestCertMonitor_Fallback(t *testing.T) { - ctx, cm := startedCertMonitor(t) - - // at this point everything is operating normally and the monitor is just - // waiting for events. We are going to send a new cert that is basically - // already expired and then allow the fallback routine to kick in. - secondCert := newLeaf(t, cm.initialRoots.Roots[0], 100, time.Nanosecond) - secondCA := connect.TestCA(t, cm.initialRoots.Roots[0]) - secondRoots := structs.IndexedCARoots{ - ActiveRootID: secondCA.ID, - TrustDomain: connect.TestClusterID, - Roots: []*structs.CARoot{ - secondCA, - cm.initialRoots.Roots[0], - }, - QueryMeta: structs.QueryMeta{ - Index: 101, - }, - } - thirdCert := newLeaf(t, secondCA, 102, 10*time.Minute) - - // inject a fallback routine error to check that we rerun it quickly - cm.fallback.On("fallback").Return(nil, fmt.Errorf("induced error")).Once() - - fallbackResp := &structs.SignedResponse{ - ConnectCARoots: secondRoots, - IssuedCert: *thirdCert, - ManualCARoots: cm.extraCACerts, - VerifyServerHostname: true, - } - // expect the fallback routine to be executed and setup the return - cm.fallback.On("fallback").Return(fallbackResp, nil).Once() - - cm.persist.On("persist", &structs.SignedResponse{ - IssuedCert: *secondCert, - ConnectCARoots: *cm.initialRoots, - ManualCARoots: cm.extraCACerts, - VerifyServerHostname: cm.verifyServerHostname, - }).Return(nil).Once() - - cm.persist.On("persist", fallbackResp).Return(nil).Once() - - // Add another roots cache prepopulation expectation which should happen - // in response to executing the fallback mechanism - rootRes := cache.FetchResult{Value: &secondRoots, Index: 101} - rootsReq := structs.DCSpecificRequest{Datacenter: cm.datacenter} - cm.mcache.On("Prepopulate", cachetype.ConnectCARootName, rootRes, cm.datacenter, "", rootsReq.CacheInfo().Key).Return(nil).Once() - - // add another leaf cert cache prepopulation expectation which should happen - // in response to executing the fallback mechanism - leafReq := cachetype.ConnectCALeafRequest{ - Token: cm.tokens.AgentToken(), - Agent: cm.nodeName, - Datacenter: cm.datacenter, - DNSSAN: cm.dns, - IPSAN: cm.ips, - } - leafRes := cache.FetchResult{ - Value: thirdCert, - Index: 101, - State: cachetype.ConnectCALeafSuccess(secondCA.SigningKeyID), - } - cm.mcache.On("Prepopulate", cachetype.ConnectCALeafName, leafRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()).Return(nil).Once() - - // nothing in the monitor should be looking at this as its only done - // in response to sending token updates, no need to synchronize - key := cm.monitor.leafReq.CacheInfo().Key - // send the new certificate - this notifies only the watchers utilizing - // the new ACL token - require.True(t, cm.mcache.sendNotification(ctx, key, cache.UpdateEvent{ - CorrelationID: leafWatchID, - Result: secondCert, - Meta: cache.ResultMeta{ - Index: secondCert.ModifyIndex, - }, - })) - - // if all went well we would have updated the first certificate which was pretty much expired - // causing the fallback handler to be invoked almost immediately. The fallback routine will - // return the response containing the third cert and second CA roots so now we should wait - // a little while and ensure they were applied to the TLS Configurator - tlsCert := tlsCertificateFromIssued(t, thirdCert) - expectedCAs := append(cm.extraCACerts, secondCA.RootCert, cm.initialRoots.Roots[0].RootCert) - - // this will wait up to 200ms (8 x 25 ms waits between the 9 requests) - retry.RunWith(&retry.Counter{Count: 9, Wait: 25 * time.Millisecond}, t, func(r *retry.R) { - require.Equal(r, tlsCert, cm.tls.Cert()) - require.ElementsMatch(r, expectedCAs, cm.tls.CAPems()) - }) - - cm.assertExpectations(t) -} - -func TestCertMonitor_New_Errors(t *testing.T) { - type testCase struct { - cfg Config - err string - } - - fallback := func(_ context.Context) (*structs.SignedResponse, error) { - return nil, fmt.Errorf("Unimplemented") - } - - tokens := new(token.Store) - - cases := map[string]testCase{ - "no-cache": { - cfg: Config{ - TLSConfigurator: testTLSConfigurator(t), - Fallback: fallback, - Tokens: tokens, - Datacenter: "foo", - NodeName: "bar", - }, - err: "CertMonitor creation requires a Cache", - }, - "no-tls-configurator": { - cfg: Config{ - Cache: cache.New(cache.Options{}), - Fallback: fallback, - Tokens: tokens, - Datacenter: "foo", - NodeName: "bar", - }, - err: "CertMonitor creation requires a TLS Configurator", - }, - "no-fallback": { - cfg: Config{ - Cache: cache.New(cache.Options{}), - TLSConfigurator: testTLSConfigurator(t), - Tokens: tokens, - Datacenter: "foo", - NodeName: "bar", - }, - err: "CertMonitor creation requires specifying a FallbackFunc", - }, - "no-tokens": { - cfg: Config{ - Cache: cache.New(cache.Options{}), - TLSConfigurator: testTLSConfigurator(t), - Fallback: fallback, - Datacenter: "foo", - NodeName: "bar", - }, - err: "CertMonitor creation requires specifying a token store", - }, - "no-datacenter": { - cfg: Config{ - Cache: cache.New(cache.Options{}), - TLSConfigurator: testTLSConfigurator(t), - Fallback: fallback, - Tokens: tokens, - NodeName: "bar", - }, - err: "CertMonitor creation requires specifying the datacenter", - }, - "no-node-name": { - cfg: Config{ - Cache: cache.New(cache.Options{}), - TLSConfigurator: testTLSConfigurator(t), - Fallback: fallback, - Tokens: tokens, - Datacenter: "foo", - }, - err: "CertMonitor creation requires specifying the agent's node name", - }, - } - - for name, tcase := range cases { - t.Run(name, func(t *testing.T) { - monitor, err := New(&tcase.cfg) - testutil.RequireErrorContains(t, err, tcase.err) - require.Nil(t, monitor) - }) - } -} diff --git a/agent/cert-monitor/config.go b/agent/cert-monitor/config.go deleted file mode 100644 index 2e4bcc57c..000000000 --- a/agent/cert-monitor/config.go +++ /dev/null @@ -1,150 +0,0 @@ -package certmon - -import ( - "context" - "net" - "time" - - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/agent/token" - "github.com/hashicorp/consul/tlsutil" - "github.com/hashicorp/go-hclog" -) - -// FallbackFunc is used when the normal cache watch based Certificate -// updating fails to update the Certificate in time and a different -// method of updating the certificate is required. -type FallbackFunc func(context.Context) (*structs.SignedResponse, error) - -// PersistFunc is used to persist the data from a signed response -type PersistFunc func(*structs.SignedResponse) error - -type Config struct { - // Logger is the logger to be used while running. If not set - // then no logging will be performed. - Logger hclog.Logger - - // TLSConfigurator is where the certificates and roots are set when - // they are updated. This field is required. - TLSConfigurator *tlsutil.Configurator - - // Cache is an object implementing our Cache interface. The Cache - // used at runtime must be able to handle Roots and Leaf Cert watches - Cache Cache - - // Tokens is the shared token store. It is used to retrieve the current - // agent token as well as getting notifications when that token is updated. - // This field is required. - Tokens *token.Store - - // Persist is a function to run when there are new certs or keys - Persist PersistFunc - - // Fallback is a function to run when the normal cache updating of the - // agent's certificates has failed to work for one reason or another. - // This field is required. - Fallback FallbackFunc - - // FallbackLeeway is the amount of time after certificate expiration before - // invoking the fallback routine. If not set this will default to 10s. - FallbackLeeway time.Duration - - // FallbackRetry is the duration between Fallback invocations when the configured - // fallback routine returns an error. If not set this will default to 1m. - FallbackRetry time.Duration - - // DNSSANs is a list of DNS SANs that certificate requests should include. This - // field is optional and no extra DNS SANs will be requested if unset. 'localhost' - // is unconditionally requested by the cache implementation. - DNSSANs []string - - // IPSANs is a list of IP SANs to include in the certificate signing request. This - // field is optional and no extra IP SANs will be requested if unset. Both '127.0.0.1' - // and '::1' IP SANs are unconditionally requested by the cache implementation. - IPSANs []net.IP - - // Datacenter is the datacenter to request certificates within. This filed is required - Datacenter string - - // NodeName is the agent's node name to use when requesting certificates. This field - // is required. - NodeName string -} - -// WithCache will cause the created CertMonitor type to use the provided Cache -func (cfg *Config) WithCache(cache Cache) *Config { - cfg.Cache = cache - return cfg -} - -// WithLogger will cause the created CertMonitor type to use the provided logger -func (cfg *Config) WithLogger(logger hclog.Logger) *Config { - cfg.Logger = logger - return cfg -} - -// WithTLSConfigurator will cause the created CertMonitor type to use the provided configurator -func (cfg *Config) WithTLSConfigurator(tlsConfigurator *tlsutil.Configurator) *Config { - cfg.TLSConfigurator = tlsConfigurator - return cfg -} - -// WithTokens will cause the created CertMonitor type to use the provided token store -func (cfg *Config) WithTokens(tokens *token.Store) *Config { - cfg.Tokens = tokens - return cfg -} - -// WithFallback configures a fallback function to use if the normal update mechanisms -// fail to renew the certificate in time. -func (cfg *Config) WithFallback(fallback FallbackFunc) *Config { - cfg.Fallback = fallback - return cfg -} - -// WithDNSSANs configures the CertMonitor to request these DNS SANs when requesting a new -// certificate -func (cfg *Config) WithDNSSANs(sans []string) *Config { - cfg.DNSSANs = sans - return cfg -} - -// WithIPSANs configures the CertMonitor to request these IP SANs when requesting a new -// certificate -func (cfg *Config) WithIPSANs(sans []net.IP) *Config { - cfg.IPSANs = sans - return cfg -} - -// WithDatacenter configures the CertMonitor to request Certificates in this DC -func (cfg *Config) WithDatacenter(dc string) *Config { - cfg.Datacenter = dc - return cfg -} - -// WithNodeName configures the CertMonitor to request Certificates with this agent name -func (cfg *Config) WithNodeName(name string) *Config { - cfg.NodeName = name - return cfg -} - -// WithFallbackLeeway configures how long after a certificate expires before attempting to -// generarte a new certificate using the fallback mechanism. The default is 10s. -func (cfg *Config) WithFallbackLeeway(leeway time.Duration) *Config { - cfg.FallbackLeeway = leeway - return cfg -} - -// WithFallbackRetry controls how quickly we will make subsequent invocations of -// the fallback func in the case of it erroring out. -func (cfg *Config) WithFallbackRetry(after time.Duration) *Config { - cfg.FallbackRetry = after - return cfg -} - -// WithPersistence will configure the CertMonitor to use this callback for persisting -// a new TLS configuration. -func (cfg *Config) WithPersistence(persist PersistFunc) *Config { - cfg.Persist = persist - return cfg -} diff --git a/agent/consul/auto_encrypt.go b/agent/consul/auto_encrypt.go deleted file mode 100644 index 0684e7f71..000000000 --- a/agent/consul/auto_encrypt.go +++ /dev/null @@ -1,239 +0,0 @@ -package consul - -import ( - "context" - "fmt" - "net" - "strings" - "time" - - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib" - "github.com/hashicorp/go-hclog" - "github.com/miekg/dns" -) - -const ( - dummyTrustDomain = "dummy.trustdomain" - retryJitterWindow = 30 * time.Second -) - -func (c *Client) autoEncryptCSR(extraDNSSANs []string, extraIPSANs []net.IP) (string, string, error) { - // We don't provide the correct host here, because we don't know any - // better at this point. Apart from the domain, we would need the - // ClusterID, which we don't have. This is why we go with - // dummyTrustDomain the first time. Subsequent CSRs will have the - // correct TrustDomain. - id := &connect.SpiffeIDAgent{ - Host: dummyTrustDomain, - Datacenter: c.config.Datacenter, - Agent: c.config.NodeName, - } - - conf, err := c.config.CAConfig.GetCommonConfig() - if err != nil { - return "", "", err - } - - if conf.PrivateKeyType == "" { - conf.PrivateKeyType = connect.DefaultPrivateKeyType - } - if conf.PrivateKeyBits == 0 { - conf.PrivateKeyBits = connect.DefaultPrivateKeyBits - } - - // Create a new private key - pk, pkPEM, err := connect.GeneratePrivateKeyWithConfig(conf.PrivateKeyType, conf.PrivateKeyBits) - if err != nil { - return "", "", err - } - - dnsNames := append([]string{"localhost"}, extraDNSSANs...) - ipAddresses := append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, extraIPSANs...) - - // Create a CSR. - // - // The Common Name includes the dummy trust domain for now but Server will - // override this when it is signed anyway so it's OK. - cn := connect.AgentCN(c.config.NodeName, dummyTrustDomain) - csr, err := connect.CreateCSR(id, cn, pk, dnsNames, ipAddresses) - if err != nil { - return "", "", err - } - - return pkPEM, csr, nil -} - -func (c *Client) RequestAutoEncryptCerts(ctx context.Context, servers []string, port int, token string, extraDNSSANs []string, extraIPSANs []net.IP) (*structs.SignedResponse, error) { - errFn := func(err error) (*structs.SignedResponse, error) { - return nil, err - } - - // Check if we know about a server already through gossip. Depending on - // how the agent joined, there might already be one. Also in case this - // gets called because the cert expired. - server := c.router.FindLANServer() - if server != nil { - servers = []string{server.Addr.String()} - } - - if len(servers) == 0 { - return errFn(fmt.Errorf("No servers to request AutoEncrypt.Sign")) - } - - pkPEM, csr, err := c.autoEncryptCSR(extraDNSSANs, extraIPSANs) - if err != nil { - return errFn(err) - } - - // Prepare request and response so that it can be passed to - // RPCInsecure. - args := structs.CASignRequest{ - WriteRequest: structs.WriteRequest{Token: token}, - Datacenter: c.config.Datacenter, - CSR: csr, - } - var reply structs.SignedResponse - - // Retry implementation modeled after https://github.com/hashicorp/consul/pull/5228. - // TLDR; there is a 30s window from which a random time is picked. - // Repeat until the call is successful. - attempts := 0 - for { - select { - case <-ctx.Done(): - return errFn(fmt.Errorf("aborting AutoEncrypt because interrupted: %w", ctx.Err())) - default: - } - - // Translate host to net.TCPAddr to make life easier for - // RPCInsecure. - for _, s := range servers { - ips, err := resolveAddr(s, c.logger) - if err != nil { - c.logger.Warn("AutoEncrypt resolveAddr failed", "error", err) - continue - } - - for _, ip := range ips { - addr := net.TCPAddr{IP: ip, Port: port} - - if err = c.connPool.RPC(c.config.Datacenter, c.config.NodeName, &addr, "AutoEncrypt.Sign", &args, &reply); err == nil { - reply.IssuedCert.PrivateKeyPEM = pkPEM - return &reply, nil - } else { - c.logger.Warn("AutoEncrypt failed", "error", err) - } - } - } - attempts++ - - delay := lib.RandomStagger(retryJitterWindow) - interval := (time.Duration(attempts) * delay) + delay - c.logger.Warn("retrying AutoEncrypt", "retry_interval", interval) - select { - case <-time.After(interval): - continue - case <-ctx.Done(): - return errFn(fmt.Errorf("aborting AutoEncrypt because interrupted: %w", ctx.Err())) - case <-c.shutdownCh: - return errFn(fmt.Errorf("aborting AutoEncrypt because shutting down")) - } - } -} - -func missingPortError(host string, err error) bool { - return err != nil && err.Error() == fmt.Sprintf("address %s: missing port in address", host) -} - -// resolveAddr is used to resolve the host into IPs and error. -func resolveAddr(rawHost string, logger hclog.Logger) ([]net.IP, error) { - host, _, err := net.SplitHostPort(rawHost) - if err != nil { - // In case we encounter this error, we proceed with the - // rawHost. This is fine since -start-join and -retry-join - // take only hosts anyways and this is an expected case. - if missingPortError(rawHost, err) { - host = rawHost - } else { - return nil, err - } - } - - if ip := net.ParseIP(host); ip != nil { - return []net.IP{ip}, nil - } - - // First try TCP so we have the best chance for the largest list of - // hosts to join. If this fails it's not fatal since this isn't a standard - // way to query DNS, and we have a fallback below. - if ips, err := tcpLookupIP(host, logger); err != nil { - logger.Debug("TCP-first lookup failed for host, falling back to UDP", "host", host, "error", err) - } else if len(ips) > 0 { - return ips, nil - } - - // If TCP didn't yield anything then use the normal Go resolver which - // will try UDP, then might possibly try TCP again if the UDP response - // indicates it was truncated. - ips, err := net.LookupIP(host) - if err != nil { - return nil, err - } - return ips, nil -} - -// tcpLookupIP is a helper to initiate a TCP-based DNS lookup for the given host. -// The built-in Go resolver will do a UDP lookup first, and will only use TCP if -// the response has the truncate bit set, which isn't common on DNS servers like -// Consul's. By doing the TCP lookup directly, we get the best chance for the -// largest list of hosts to join. Since joins are relatively rare events, it's ok -// to do this rather expensive operation. -func tcpLookupIP(host string, logger hclog.Logger) ([]net.IP, error) { - // Don't attempt any TCP lookups against non-fully qualified domain - // names, since those will likely come from the resolv.conf file. - if !strings.Contains(host, ".") { - return nil, nil - } - - // Make sure the domain name is terminated with a dot (we know there's - // at least one character at this point). - dn := host - if dn[len(dn)-1] != '.' { - dn = dn + "." - } - - // See if we can find a server to try. - cc, err := dns.ClientConfigFromFile("/etc/resolv.conf") - if err != nil { - return nil, err - } - if len(cc.Servers) > 0 { - // Do the lookup. - c := new(dns.Client) - c.Net = "tcp" - msg := new(dns.Msg) - msg.SetQuestion(dn, dns.TypeANY) - in, _, err := c.Exchange(msg, cc.Servers[0]) - if err != nil { - return nil, err - } - - // Handle any IPs we get back that we can attempt to join. - var ips []net.IP - for _, r := range in.Answer { - switch rr := r.(type) { - case (*dns.A): - ips = append(ips, rr.A) - case (*dns.AAAA): - ips = append(ips, rr.AAAA) - case (*dns.CNAME): - logger.Debug("Ignoring CNAME RR in TCP-first answer for host", "host", host) - } - } - return ips, nil - } - - return nil, nil -} diff --git a/agent/consul/auto_encrypt_test.go b/agent/consul/auto_encrypt_test.go deleted file mode 100644 index 8dd04e416..000000000 --- a/agent/consul/auto_encrypt_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package consul - -import ( - "context" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "net" - "net/url" - "os" - "testing" - "time" - - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" -) - -func TestAutoEncrypt_resolveAddr(t *testing.T) { - type args struct { - rawHost string - logger hclog.Logger - } - logger := testutil.Logger(t) - - tests := []struct { - name string - args args - ips []net.IP - wantErr bool - }{ - { - name: "host without port", - args: args{ - "127.0.0.1", - logger, - }, - ips: []net.IP{net.IPv4(127, 0, 0, 1)}, - wantErr: false, - }, - { - name: "host with port", - args: args{ - "127.0.0.1:1234", - logger, - }, - ips: []net.IP{net.IPv4(127, 0, 0, 1)}, - wantErr: false, - }, - { - name: "host with broken port", - args: args{ - "127.0.0.1:xyz", - logger, - }, - ips: []net.IP{net.IPv4(127, 0, 0, 1)}, - wantErr: false, - }, - { - name: "not an address", - args: args{ - "abc", - logger, - }, - ips: nil, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ips, err := resolveAddr(tt.args.rawHost, tt.args.logger) - if (err != nil) != tt.wantErr { - t.Errorf("resolveAddr error: %v, wantErr: %v", err, tt.wantErr) - return - } - require.Equal(t, tt.ips, ips) - }) - } -} - -func TestAutoEncrypt_missingPortError(t *testing.T) { - host := "127.0.0.1" - _, _, err := net.SplitHostPort(host) - require.True(t, missingPortError(host, err)) - - host = "127.0.0.1:1234" - _, _, err = net.SplitHostPort(host) - require.False(t, missingPortError(host, err)) -} - -func TestAutoEncrypt_RequestAutoEncryptCerts(t *testing.T) { - dir1, c1 := testClient(t) - defer os.RemoveAll(dir1) - defer c1.Shutdown() - servers := []string{"localhost"} - port := 8301 - token := "" - - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(75*time.Millisecond)) - defer cancel() - - doneCh := make(chan struct{}) - var err error - go func() { - _, err = c1.RequestAutoEncryptCerts(ctx, servers, port, token, nil, nil) - close(doneCh) - }() - select { - case <-doneCh: - // since there are no servers at this port, we shouldn't be - // done and this should be an error of some sorts that happened - // in the setup phase before entering the for loop in - // RequestAutoEncryptCerts. - require.NoError(t, err) - case <-ctx.Done(): - // this is the happy case since auto encrypt is in its loop to - // try to request certs. - } -} - -func TestAutoEncrypt_autoEncryptCSR(t *testing.T) { - type testCase struct { - conf *Config - extraDNSSANs []string - extraIPSANs []net.IP - err string - - // to validate the csr - expectedSubject pkix.Name - expectedSigAlg x509.SignatureAlgorithm - expectedPubAlg x509.PublicKeyAlgorithm - expectedDNSNames []string - expectedIPs []net.IP - expectedURIs []*url.URL - } - - cases := map[string]testCase{ - "sans": { - conf: &Config{ - Datacenter: "dc1", - NodeName: "test-node", - CAConfig: &structs.CAConfiguration{}, - }, - extraDNSSANs: []string{"foo.local", "bar.local"}, - extraIPSANs: []net.IP{net.IPv4(198, 18, 0, 1), net.IPv4(198, 18, 0, 2)}, - expectedSubject: pkix.Name{ - CommonName: connect.AgentCN("test-node", dummyTrustDomain), - Names: []pkix.AttributeTypeAndValue{ - { - // 2,5,4,3 is the CommonName type ASN1 identifier - Type: asn1.ObjectIdentifier{2, 5, 4, 3}, - Value: "testnode.agnt.dummy.tr.consul", - }, - }, - }, - expectedSigAlg: x509.ECDSAWithSHA256, - expectedPubAlg: x509.ECDSA, - expectedDNSNames: []string{ - "localhost", - "foo.local", - "bar.local", - }, - expectedIPs: []net.IP{ - {127, 0, 0, 1}, - net.ParseIP("::1"), - {198, 18, 0, 1}, - {198, 18, 0, 2}, - }, - expectedURIs: []*url.URL{ - { - Scheme: "spiffe", - Host: dummyTrustDomain, - Path: "/agent/client/dc/dc1/id/test-node", - }, - }, - }, - } - - for name, tcase := range cases { - t.Run(name, func(t *testing.T) { - client := Client{config: tcase.conf} - - _, csr, err := client.autoEncryptCSR(tcase.extraDNSSANs, tcase.extraIPSANs) - if tcase.err == "" { - require.NoError(t, err) - - request, err := connect.ParseCSR(csr) - require.NoError(t, err) - require.NotNil(t, request) - - require.Equal(t, tcase.expectedSubject, request.Subject) - require.Equal(t, tcase.expectedSigAlg, request.SignatureAlgorithm) - require.Equal(t, tcase.expectedPubAlg, request.PublicKeyAlgorithm) - require.Equal(t, tcase.expectedDNSNames, request.DNSNames) - require.Equal(t, tcase.expectedIPs, request.IPAddresses) - require.Equal(t, tcase.expectedURIs, request.URIs) - } else { - require.Error(t, err) - require.Empty(t, csr) - } - }) - } -} diff --git a/agent/setup.go b/agent/setup.go index 4ed282322..b807e7f7e 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -1,7 +1,6 @@ package agent import ( - "context" "fmt" "io" "net" @@ -10,11 +9,9 @@ import ( autoconf "github.com/hashicorp/consul/agent/auto-config" "github.com/hashicorp/consul/agent/cache" - certmon "github.com/hashicorp/consul/agent/cert-monitor" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" @@ -86,38 +83,21 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) d.Cache = cache.New(cfg.Cache) d.ConnPool = newConnPool(cfg, d.Logger, d.TLSConfigurator) - deferredAC := &deferredAutoConfig{} - d.Router = router.NewRouter(d.Logger, cfg.Datacenter, fmt.Sprintf("%s.%s", cfg.NodeName, cfg.Datacenter)) - cmConf := new(certmon.Config). - WithCache(d.Cache). - WithTLSConfigurator(d.TLSConfigurator). - WithDNSSANs(cfg.AutoConfig.DNSSANs). - WithIPSANs(cfg.AutoConfig.IPSANs). - WithDatacenter(cfg.Datacenter). - WithNodeName(cfg.NodeName). - WithFallback(deferredAC.autoConfigFallbackTLS). - WithLogger(d.Logger.Named(logging.AutoConfig)). - WithTokens(d.Tokens). - WithPersistence(deferredAC.autoConfigPersist) - acCertMon, err := certmon.New(cmConf) - if err != nil { - return d, err - } - acConf := autoconf.Config{ - DirectRPC: d.ConnPool, - Logger: d.Logger, - CertMonitor: acCertMon, - Loader: configLoader, + DirectRPC: d.ConnPool, + Logger: d.Logger, + Loader: configLoader, + ServerProvider: d.Router, + TLSConfigurator: d.TLSConfigurator, + Cache: d.Cache, + Tokens: d.Tokens, } d.AutoConfig, err = autoconf.New(acConf) if err != nil { return d, err } - // TODO: can this cyclic dependency be un-cycled? - deferredAC.autoConf = d.AutoConfig return d, nil } @@ -144,21 +124,3 @@ func newConnPool(config *config.RuntimeConfig, logger hclog.Logger, tls *tlsutil } return pool } - -type deferredAutoConfig struct { - autoConf *autoconf.AutoConfig // TODO: use an interface -} - -func (a *deferredAutoConfig) autoConfigFallbackTLS(ctx context.Context) (*structs.SignedResponse, error) { - if a.autoConf == nil { - return nil, fmt.Errorf("AutoConfig manager has not been created yet") - } - return a.autoConf.FallbackTLS(ctx) -} - -func (a *deferredAutoConfig) autoConfigPersist(resp *structs.SignedResponse) error { - if a.autoConf == nil { - return fmt.Errorf("AutoConfig manager has not been created yet") - } - return a.autoConf.RecordUpdatedCerts(resp) -} diff --git a/proto/translate.go b/proto/translate.go index 3619a0e6e..6ee90c084 100644 --- a/proto/translate.go +++ b/proto/translate.go @@ -12,6 +12,8 @@ var ( timePtrType = reflect.TypeOf((*time.Time)(nil)) timeType = timePtrType.Elem() mapStrInf = reflect.TypeOf((map[string]interface{})(nil)) + + epoch1970 = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) ) // HookPBTimestampToTime is a mapstructure decode hook to translate a protobuf timestamp @@ -19,7 +21,10 @@ var ( func HookPBTimestampToTime(from, to reflect.Type, data interface{}) (interface{}, error) { if to == timeType && from == tsType { ts := data.(*types.Timestamp) - return time.Unix(ts.Seconds, int64(ts.Nanos)), nil + if ts.Seconds == 0 && ts.Nanos == 0 { + return time.Time{}, nil + } + return time.Unix(ts.Seconds, int64(ts.Nanos)).UTC(), nil } return data, nil @@ -39,6 +44,13 @@ func HookTimeToPBTimestamp(from, to reflect.Type, data interface{}) (interface{} // seeing a *time.Time instead of a time.Time. if from == timePtrType && to == mapStrInf { ts := data.(*time.Time) + + // protobuf only supports times from Jan 1 1970 onward but the time.Time type + // can represent values back to year 1. Basically + if ts.Before(epoch1970) { + return map[string]interface{}{}, nil + } + nanos := ts.UnixNano() if nanos < 0 { return map[string]interface{}{}, nil diff --git a/proto/translate_test.go b/proto/translate_test.go index cd88d8933..0fbfa2b9b 100644 --- a/proto/translate_test.go +++ b/proto/translate_test.go @@ -27,7 +27,7 @@ func TestHookPBTimestampToTime(t *testing.T) { } expected := timeTSWrapper{ - Timestamp: time.Unix(1000, 42), + Timestamp: time.Unix(1000, 42).UTC(), } var actual timeTSWrapper @@ -43,7 +43,7 @@ func TestHookPBTimestampToTime(t *testing.T) { func TestHookTimeToPBTimestamp(t *testing.T) { in := timeTSWrapper{ - Timestamp: time.Unix(999999, 123456), + Timestamp: time.Unix(999999, 123456).UTC(), } expected := pbTSWrapper{ @@ -63,3 +63,24 @@ func TestHookTimeToPBTimestamp(t *testing.T) { require.Equal(t, expected, actual) } + +func TestHookTimeToPBTimestamp_ZeroTime(t *testing.T) { + in := timeTSWrapper{} + + expected := pbTSWrapper{ + Timestamp: &types.Timestamp{ + Seconds: 0, + Nanos: 0, + }, + } + + var actual pbTSWrapper + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: HookTimeToPBTimestamp, + Result: &actual, + }) + require.NoError(t, err) + require.NoError(t, decoder.Decode(in)) + + require.Equal(t, expected, actual) +} From fbae52177541d9595ecba255ea15553e391e86c7 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Mon, 24 Aug 2020 17:46:55 -0400 Subject: [PATCH 12/73] fix TestStore_RegularTokens This test was only passing because t.Parallel was causing every subtest to run with the last value in the iteration, which sets a value for all tokens. The test started to fail once t.Parallel was removed, but the same failure could have been produced by adding 'tt := tt' to the t.Run() func. These tests run in under 10ms, so there is no reason to use t.Parallel. --- agent/token/store_test.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/agent/token/store_test.go b/agent/token/store_test.go index f46fcc3a9..6df812257 100644 --- a/agent/token/store_test.go +++ b/agent/token/store_test.go @@ -7,8 +7,6 @@ import ( ) func TestStore_RegularTokens(t *testing.T) { - t.Parallel() - type tokens struct { userSource TokenSource user string @@ -89,13 +87,22 @@ func TestStore_RegularTokens(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() - s := new(Store) - require.True(t, s.UpdateUserToken(tt.set.user, tt.set.userSource)) - require.True(t, s.UpdateAgentToken(tt.set.agent, tt.set.agentSource)) - require.True(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) - require.True(t, s.UpdateAgentMasterToken(tt.set.master, tt.set.masterSource)) + if tt.set.user != "" { + require.True(t, s.UpdateUserToken(tt.set.user, tt.set.userSource)) + } + + if tt.set.agent != "" { + require.True(t, s.UpdateAgentToken(tt.set.agent, tt.set.agentSource)) + } + + if tt.set.repl != "" { + require.True(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) + } + + if tt.set.master != "" { + require.True(t, s.UpdateAgentMasterToken(tt.set.master, tt.set.masterSource)) + } // If they don't change then they return false. require.False(t, s.UpdateUserToken(tt.set.user, tt.set.userSource)) @@ -128,7 +135,6 @@ func TestStore_RegularTokens(t *testing.T) { } func TestStore_AgentMasterToken(t *testing.T) { - t.Parallel() s := new(Store) verify := func(want bool, toks ...string) { @@ -152,7 +158,6 @@ func TestStore_AgentMasterToken(t *testing.T) { } func TestStore_Notify(t *testing.T) { - t.Parallel() s := new(Store) newNotification := func(t *testing.T, s *Store, kind TokenKind) Notifier { From b64ce07ef786df4fce254ba81c74b7587076cd56 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Mon, 17 Aug 2020 19:30:25 -0400 Subject: [PATCH 13/73] agent/token: Move token persistence out of agent And into token.Store. This change isolates any awareness of token persistence in a single place. It is a small step in allowing Agent.New to accept its dependencies. --- agent/acl_test.go | 4 +- agent/agent.go | 103 +-------------- agent/agent_endpoint.go | 102 +++++---------- agent/agent_endpoint_test.go | 16 ++- agent/agent_oss.go | 4 - agent/agent_test.go | 157 ----------------------- agent/config/runtime.go | 5 + agent/setup.go | 1 + agent/token/persistence.go | 190 ++++++++++++++++++++++++++++ agent/token/persistence_test.go | 213 ++++++++++++++++++++++++++++++++ agent/token/store.go | 14 ++- agent/token/store_oss.go | 6 +- 12 files changed, 473 insertions(+), 342 deletions(-) create mode 100644 agent/token/persistence.go create mode 100644 agent/token/persistence_test.go diff --git a/agent/acl_test.go b/agent/acl_test.go index a88d89273..79ade86b2 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -184,7 +184,9 @@ func TestACL_AgentMasterToken(t *testing.T) { t.Parallel() a := NewTestACLAgent(t, t.Name(), TestACLConfig(), nil, nil) - a.loadTokens(a.config) + err := a.tokens.Load(a.config.ACLTokens, a.logger) + require.NoError(t, err) + authz, err := a.resolveToken("towel") require.NotNil(t, authz) require.Nil(t, err) diff --git a/agent/agent.go b/agent/agent.go index 0c639da9a..a56888414 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" @@ -39,7 +40,6 @@ import ( "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/systemd" - "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/xds" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api/watch" @@ -67,9 +67,6 @@ const ( checksDir = "checks" checkStateDir = "checks/state" - // Name of the file tokens will be persisted within - tokensPath = "acl-tokens.json" - // Default reasons for node/service maintenance mode defaultNodeMaintReason = "Maintenance mode is enabled for this node, " + "but no reason was provided. This is a default message." @@ -292,11 +289,6 @@ type Agent struct { // based on the current consul configuration. tlsConfigurator *tlsutil.Configurator - // persistedTokensLock is used to synchronize access to the persisted token - // store within the data directory. This will prevent loading while writing as - // well as multiple concurrent writes. - persistedTokensLock sync.RWMutex - // httpConnLimiter is used to limit connections to the HTTP server by client // IP. httpConnLimiter connlimit.Limiter @@ -370,10 +362,8 @@ func New(bd BaseDeps) (*Agent, error) { // pass the agent itself so its safe to move here. a.registerCache() - // TODO: move to newBaseDeps - // TODO: handle error - a.loadTokens(a.config) - a.loadEnterpriseTokens(a.config) + // TODO: why do we ignore failure to load persisted tokens? + _ = a.tokens.Load(bd.RuntimeConfig.ACLTokens, a.logger) return &a, nil } @@ -3387,90 +3377,6 @@ func (a *Agent) unloadChecks() error { return nil } -type persistedTokens struct { - Replication string `json:"replication,omitempty"` - AgentMaster string `json:"agent_master,omitempty"` - Default string `json:"default,omitempty"` - Agent string `json:"agent,omitempty"` -} - -func (a *Agent) getPersistedTokens() (*persistedTokens, error) { - persistedTokens := &persistedTokens{} - if !a.config.ACLEnableTokenPersistence { - return persistedTokens, nil - } - - a.persistedTokensLock.RLock() - defer a.persistedTokensLock.RUnlock() - - tokensFullPath := filepath.Join(a.config.DataDir, tokensPath) - - buf, err := ioutil.ReadFile(tokensFullPath) - if err != nil { - if os.IsNotExist(err) { - // non-existence is not an error we care about - return persistedTokens, nil - } - return persistedTokens, fmt.Errorf("failed reading tokens file %q: %s", tokensFullPath, err) - } - - if err := json.Unmarshal(buf, persistedTokens); err != nil { - return persistedTokens, fmt.Errorf("failed to decode tokens file %q: %s", tokensFullPath, err) - } - - return persistedTokens, nil -} - -func (a *Agent) loadTokens(conf *config.RuntimeConfig) error { - persistedTokens, persistenceErr := a.getPersistedTokens() - - if persistenceErr != nil { - a.logger.Warn("unable to load persisted tokens", "error", persistenceErr) - } - - if persistedTokens.Default != "" { - a.tokens.UpdateUserToken(persistedTokens.Default, token.TokenSourceAPI) - - if conf.ACLToken != "" { - a.logger.Warn("\"default\" token present in both the configuration and persisted token store, using the persisted token") - } - } else { - a.tokens.UpdateUserToken(conf.ACLToken, token.TokenSourceConfig) - } - - if persistedTokens.Agent != "" { - a.tokens.UpdateAgentToken(persistedTokens.Agent, token.TokenSourceAPI) - - if conf.ACLAgentToken != "" { - a.logger.Warn("\"agent\" token present in both the configuration and persisted token store, using the persisted token") - } - } else { - a.tokens.UpdateAgentToken(conf.ACLAgentToken, token.TokenSourceConfig) - } - - if persistedTokens.AgentMaster != "" { - a.tokens.UpdateAgentMasterToken(persistedTokens.AgentMaster, token.TokenSourceAPI) - - if conf.ACLAgentMasterToken != "" { - a.logger.Warn("\"agent_master\" token present in both the configuration and persisted token store, using the persisted token") - } - } else { - a.tokens.UpdateAgentMasterToken(conf.ACLAgentMasterToken, token.TokenSourceConfig) - } - - if persistedTokens.Replication != "" { - a.tokens.UpdateReplicationToken(persistedTokens.Replication, token.TokenSourceAPI) - - if conf.ACLReplicationToken != "" { - a.logger.Warn("\"replication\" token present in both the configuration and persisted token store, using the persisted token") - } - } else { - a.tokens.UpdateReplicationToken(conf.ACLReplicationToken, token.TokenSourceConfig) - } - - return persistenceErr -} - // snapshotCheckState is used to snapshot the current state of the health // checks. This is done before we reload our checks, so that we can properly // restore into the same state. @@ -3650,8 +3556,7 @@ func (a *Agent) reloadConfigInternal(newCfg *config.RuntimeConfig) error { // Reload tokens - should be done before all the other loading // to ensure the correct tokens are available for attaching to // the checks and service registrations. - a.loadTokens(newCfg) - a.loadEnterpriseTokens(newCfg) + a.tokens.Load(newCfg.ACLTokens, a.logger) if err := a.tlsConfigurator.Update(newCfg.ToTLSUtilConfig()); err != nil { return fmt.Errorf("Failed reloading tls configuration: %s", err) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 32f26b02e..1457d5093 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1,10 +1,8 @@ package agent import ( - "encoding/json" "fmt" "net/http" - "path/filepath" "strconv" "strings" @@ -21,7 +19,6 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/lib/file" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging/monitor" "github.com/hashicorp/consul/types" @@ -1233,79 +1230,42 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in return nil, nil } - if s.agent.config.ACLEnableTokenPersistence { - // we hold the lock around updating the internal token store - // as well as persisting the tokens because we don't want to write - // into the store to have something else wipe it out before we can - // persist everything (like an agent config reload). The token store - // lock is only held for those operations so other go routines that - // just need to read some token out of the store will not be impacted - // any more than they would be without token persistence. - s.agent.persistedTokensLock.Lock() - defer s.agent.persistedTokensLock.Unlock() - } - // Figure out the target token. target := strings.TrimPrefix(req.URL.Path, "/v1/agent/token/") - triggerAntiEntropySync := false - switch target { - case "acl_token", "default": - changed := s.agent.tokens.UpdateUserToken(args.Token, token_store.TokenSourceAPI) - if changed { - triggerAntiEntropySync = true + + err = s.agent.tokens.WithPersistenceLock(func() error { + triggerAntiEntropySync := false + switch target { + case "acl_token", "default": + changed := s.agent.tokens.UpdateUserToken(args.Token, token_store.TokenSourceAPI) + if changed { + triggerAntiEntropySync = true + } + + case "acl_agent_token", "agent": + changed := s.agent.tokens.UpdateAgentToken(args.Token, token_store.TokenSourceAPI) + if changed { + triggerAntiEntropySync = true + } + + case "acl_agent_master_token", "agent_master": + s.agent.tokens.UpdateAgentMasterToken(args.Token, token_store.TokenSourceAPI) + + case "acl_replication_token", "replication": + s.agent.tokens.UpdateReplicationToken(args.Token, token_store.TokenSourceAPI) + + default: + return NotFoundError{Reason: fmt.Sprintf("Token %q is unknown", target)} } - case "acl_agent_token", "agent": - changed := s.agent.tokens.UpdateAgentToken(args.Token, token_store.TokenSourceAPI) - if changed { - triggerAntiEntropySync = true - } - - case "acl_agent_master_token", "agent_master": - s.agent.tokens.UpdateAgentMasterToken(args.Token, token_store.TokenSourceAPI) - - case "acl_replication_token", "replication": - s.agent.tokens.UpdateReplicationToken(args.Token, token_store.TokenSourceAPI) - - default: - resp.WriteHeader(http.StatusNotFound) - fmt.Fprintf(resp, "Token %q is unknown", target) - return nil, nil - } - - if triggerAntiEntropySync { - s.agent.sync.SyncFull.Trigger() - } - - if s.agent.config.ACLEnableTokenPersistence { - tokens := persistedTokens{} - - if tok, source := s.agent.tokens.UserTokenAndSource(); tok != "" && source == token_store.TokenSourceAPI { - tokens.Default = tok - } - - if tok, source := s.agent.tokens.AgentTokenAndSource(); tok != "" && source == token_store.TokenSourceAPI { - tokens.Agent = tok - } - - if tok, source := s.agent.tokens.AgentMasterTokenAndSource(); tok != "" && source == token_store.TokenSourceAPI { - tokens.AgentMaster = tok - } - - if tok, source := s.agent.tokens.ReplicationTokenAndSource(); tok != "" && source == token_store.TokenSourceAPI { - tokens.Replication = tok - } - - data, err := json.Marshal(tokens) - if err != nil { - s.agent.logger.Warn("failed to persist tokens", "error", err) - return nil, fmt.Errorf("Failed to marshal tokens for persistence: %v", err) - } - - if err := file.WriteAtomicWithPerms(filepath.Join(s.agent.config.DataDir, tokensPath), data, 0700, 0600); err != nil { - s.agent.logger.Warn("failed to persist tokens", "error", err) - return nil, fmt.Errorf("Failed to persist tokens - %v", err) + // TODO: is it safe to move this out of WithPersistenceLock? + if triggerAntiEntropySync { + s.agent.sync.SyncFull.Trigger() } + return nil + }) + if err != nil { + return nil, err } s.agent.logger.Info("Updated agent's ACL token", "token", target) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 505835ce8..5958c8b8c 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -4774,13 +4774,14 @@ func TestAgent_Token(t *testing.T) { init tokens raw tokens effective tokens + expectedErr error }{ { - name: "bad token name", - method: "PUT", - url: "nope?token=root", - body: body("X"), - code: http.StatusNotFound, + name: "bad token name", + method: "PUT", + url: "nope?token=root", + body: body("X"), + expectedErr: NotFoundError{Reason: `Token "nope" is unknown`}, }, { name: "bad JSON", @@ -4942,7 +4943,12 @@ func TestAgent_Token(t *testing.T) { url := fmt.Sprintf("/v1/agent/token/%s", tt.url) resp := httptest.NewRecorder() req, _ := http.NewRequest(tt.method, url, tt.body) + _, err := a.srv.AgentToken(resp, req) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + return + } require.NoError(t, err) require.Equal(t, tt.code, resp.Code) require.Equal(t, tt.effective.user, a.tokens.UserToken()) diff --git a/agent/agent_oss.go b/agent/agent_oss.go index 03b2f7ef5..705205fb3 100644 --- a/agent/agent_oss.go +++ b/agent/agent_oss.go @@ -23,10 +23,6 @@ func (a *Agent) initEnterprise(consulCfg *consul.Config) error { return nil } -// loadEnterpriseTokens is a noop stub for the func defined agent_ent.go -func (a *Agent) loadEnterpriseTokens(conf *config.RuntimeConfig) { -} - // reloadEnterprise is a noop stub for the func defined agent_ent.go func (a *Agent) reloadEnterprise(conf *config.RuntimeConfig) error { return nil diff --git a/agent/agent_test.go b/agent/agent_test.go index 479421f59..f3616f7c1 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -3345,163 +3345,6 @@ func TestAgent_reloadWatchesHTTPS(t *testing.T) { } } -func TestAgent_loadTokens(t *testing.T) { - t.Parallel() - a := NewTestAgent(t, ` - acl = { - enabled = true - tokens = { - agent = "alfa" - agent_master = "bravo", - default = "charlie" - replication = "delta" - } - } - - `) - defer a.Shutdown() - require := require.New(t) - - tokensFullPath := filepath.Join(a.config.DataDir, tokensPath) - - t.Run("original-configuration", func(t *testing.T) { - require.Equal("alfa", a.tokens.AgentToken()) - require.Equal("bravo", a.tokens.AgentMasterToken()) - require.Equal("charlie", a.tokens.UserToken()) - require.Equal("delta", a.tokens.ReplicationToken()) - }) - - t.Run("updated-configuration", func(t *testing.T) { - cfg := &config.RuntimeConfig{ - ACLToken: "echo", - ACLAgentToken: "foxtrot", - ACLAgentMasterToken: "golf", - ACLReplicationToken: "hotel", - } - // ensures no error for missing persisted tokens file - require.NoError(a.loadTokens(cfg)) - require.Equal("echo", a.tokens.UserToken()) - require.Equal("foxtrot", a.tokens.AgentToken()) - require.Equal("golf", a.tokens.AgentMasterToken()) - require.Equal("hotel", a.tokens.ReplicationToken()) - }) - - t.Run("persisted-tokens", func(t *testing.T) { - cfg := &config.RuntimeConfig{ - ACLToken: "echo", - ACLAgentToken: "foxtrot", - ACLAgentMasterToken: "golf", - ACLReplicationToken: "hotel", - } - - tokens := `{ - "agent" : "india", - "agent_master" : "juliett", - "default": "kilo", - "replication" : "lima" - }` - - require.NoError(ioutil.WriteFile(tokensFullPath, []byte(tokens), 0600)) - require.NoError(a.loadTokens(cfg)) - - // no updates since token persistence is not enabled - require.Equal("echo", a.tokens.UserToken()) - require.Equal("foxtrot", a.tokens.AgentToken()) - require.Equal("golf", a.tokens.AgentMasterToken()) - require.Equal("hotel", a.tokens.ReplicationToken()) - - a.config.ACLEnableTokenPersistence = true - require.NoError(a.loadTokens(cfg)) - - require.Equal("india", a.tokens.AgentToken()) - require.Equal("juliett", a.tokens.AgentMasterToken()) - require.Equal("kilo", a.tokens.UserToken()) - require.Equal("lima", a.tokens.ReplicationToken()) - }) - - t.Run("persisted-tokens-override", func(t *testing.T) { - tokens := `{ - "agent" : "mike", - "agent_master" : "november", - "default": "oscar", - "replication" : "papa" - }` - - cfg := &config.RuntimeConfig{ - ACLToken: "quebec", - ACLAgentToken: "romeo", - ACLAgentMasterToken: "sierra", - ACLReplicationToken: "tango", - } - - require.NoError(ioutil.WriteFile(tokensFullPath, []byte(tokens), 0600)) - require.NoError(a.loadTokens(cfg)) - - require.Equal("mike", a.tokens.AgentToken()) - require.Equal("november", a.tokens.AgentMasterToken()) - require.Equal("oscar", a.tokens.UserToken()) - require.Equal("papa", a.tokens.ReplicationToken()) - }) - - t.Run("partial-persisted", func(t *testing.T) { - tokens := `{ - "agent" : "uniform", - "agent_master" : "victor" - }` - - cfg := &config.RuntimeConfig{ - ACLToken: "whiskey", - ACLAgentToken: "xray", - ACLAgentMasterToken: "yankee", - ACLReplicationToken: "zulu", - } - - require.NoError(ioutil.WriteFile(tokensFullPath, []byte(tokens), 0600)) - require.NoError(a.loadTokens(cfg)) - - require.Equal("uniform", a.tokens.AgentToken()) - require.Equal("victor", a.tokens.AgentMasterToken()) - require.Equal("whiskey", a.tokens.UserToken()) - require.Equal("zulu", a.tokens.ReplicationToken()) - }) - - t.Run("persistence-error-not-json", func(t *testing.T) { - cfg := &config.RuntimeConfig{ - ACLToken: "one", - ACLAgentToken: "two", - ACLAgentMasterToken: "three", - ACLReplicationToken: "four", - } - - require.NoError(ioutil.WriteFile(tokensFullPath, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) - err := a.loadTokens(cfg) - require.Error(err) - - require.Equal("one", a.tokens.UserToken()) - require.Equal("two", a.tokens.AgentToken()) - require.Equal("three", a.tokens.AgentMasterToken()) - require.Equal("four", a.tokens.ReplicationToken()) - }) - - t.Run("persistence-error-wrong-top-level", func(t *testing.T) { - cfg := &config.RuntimeConfig{ - ACLToken: "alfa", - ACLAgentToken: "bravo", - ACLAgentMasterToken: "charlie", - ACLReplicationToken: "foxtrot", - } - - require.NoError(ioutil.WriteFile(tokensFullPath, []byte("[1,2,3]"), 0600)) - err := a.loadTokens(cfg) - require.Error(err) - - require.Equal("alfa", a.tokens.UserToken()) - require.Equal("bravo", a.tokens.AgentToken()) - require.Equal("charlie", a.tokens.AgentMasterToken()) - require.Equal("foxtrot", a.tokens.ReplicationToken()) - }) -} - func TestAgent_SecurityChecks(t *testing.T) { t.Parallel() hcl := ` diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 08ddfdb85..dd536bb8c 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" @@ -63,6 +64,10 @@ type RuntimeConfig struct { // hcl: acl.enabled = boolean ACLsEnabled bool + // TODO: remove old fields + // TODO: set DataDir as well + ACLTokens token.Config + // ACLAgentMasterToken is a special token that has full read and write // privileges for this agent, and can be used to call agent endpoints // when no servers are available. diff --git a/agent/setup.go b/agent/setup.go index b807e7f7e..d5a2d063e 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -79,6 +79,7 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) d.RuntimeConfig = cfg d.Tokens = new(token.Store) + // cache-types are not registered yet, but they won't be used until the components are started. d.Cache = cache.New(cfg.Cache) d.ConnPool = newConnPool(cfg, d.Logger, d.TLSConfigurator) diff --git a/agent/token/persistence.go b/agent/token/persistence.go new file mode 100644 index 000000000..06861a40a --- /dev/null +++ b/agent/token/persistence.go @@ -0,0 +1,190 @@ +package token + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/hashicorp/consul/lib/file" +) + +// Logger used by Store.Load to report warnings. +type Logger interface { + Warn(msg string, args ...interface{}) +} + +// Config used by Store.Load, which includes tokens and settings for persistence. +type Config struct { + EnablePersistence bool + DataDir string + ACLDefaultToken string + ACLAgentToken string + ACLAgentMasterToken string + ACLReplicationToken string +} + +const tokensPath = "acl-tokens.json" + +// Load tokens from Config and optionally from a persisted file in the cfg.DataDir. +// If a token exists in both the persisted file and in the Config a warning will +// be logged and the persisted token will be used. +// +// Failures to load the persisted file will result in loading tokens from the +// config before returning the error. +func (t *Store) Load(cfg Config, logger Logger) error { + t.persistenceLock.RLock() + if !cfg.EnablePersistence { + t.persistence = nil + t.persistenceLock.RUnlock() + loadTokens(t, cfg, persistedTokens{}, logger) + return nil + } + + defer t.persistenceLock.RUnlock() + t.persistence = &fileStore{ + filename: filepath.Join(cfg.DataDir, tokensPath), + logger: logger, + } + return t.persistence.load(t, cfg) +} + +// WithPersistenceLock executes f while hold a lock. If f returns a nil error, +// the tokens in Store will be persisted to the tokens file. Otherwise no +// tokens will be persisted, and the error from f will be returned. +// +// The lock is held so that the writes are persisted before some other thread +// can change the value. +func (t *Store) WithPersistenceLock(f func() error) error { + t.persistenceLock.Lock() + if t.persistence == nil { + t.persistenceLock.Unlock() + return f() + } + defer t.persistenceLock.Unlock() + return t.persistence.withPersistenceLock(t, f) +} + +type persistedTokens struct { + Replication string `json:"replication,omitempty"` + AgentMaster string `json:"agent_master,omitempty"` + Default string `json:"default,omitempty"` + Agent string `json:"agent,omitempty"` +} + +type fileStore struct { + filename string + logger Logger +} + +func (p *fileStore) load(s *Store, cfg Config) error { + tokens, err := readPersistedFromFile(p.filename) + if err != nil { + p.logger.Warn("unable to load persisted tokens", "error", err) + } + loadTokens(s, cfg, tokens, p.logger) + return err +} + +func loadTokens(s *Store, cfg Config, tokens persistedTokens, logger Logger) { + if tokens.Default != "" { + s.UpdateUserToken(tokens.Default, TokenSourceAPI) + + if cfg.ACLDefaultToken != "" { + logger.Warn("\"default\" token present in both the configuration and persisted token store, using the persisted token") + } + } else { + s.UpdateUserToken(cfg.ACLDefaultToken, TokenSourceConfig) + } + + if tokens.Agent != "" { + s.UpdateAgentToken(tokens.Agent, TokenSourceAPI) + + if cfg.ACLAgentToken != "" { + logger.Warn("\"agent\" token present in both the configuration and persisted token store, using the persisted token") + } + } else { + s.UpdateAgentToken(cfg.ACLAgentToken, TokenSourceConfig) + } + + if tokens.AgentMaster != "" { + s.UpdateAgentMasterToken(tokens.AgentMaster, TokenSourceAPI) + + if cfg.ACLAgentMasterToken != "" { + logger.Warn("\"agent_master\" token present in both the configuration and persisted token store, using the persisted token") + } + } else { + s.UpdateAgentMasterToken(cfg.ACLAgentMasterToken, TokenSourceConfig) + } + + if tokens.Replication != "" { + s.UpdateReplicationToken(tokens.Replication, TokenSourceAPI) + + if cfg.ACLReplicationToken != "" { + logger.Warn("\"replication\" token present in both the configuration and persisted token store, using the persisted token") + } + } else { + s.UpdateReplicationToken(cfg.ACLReplicationToken, TokenSourceConfig) + } + + loadEnterpriseTokens(s, cfg) +} + +func readPersistedFromFile(filename string) (persistedTokens, error) { + tokens := persistedTokens{} + + buf, err := ioutil.ReadFile(filename) + switch { + case os.IsNotExist(err): + // non-existence is not an error we care about + return tokens, nil + case err != nil: + return tokens, fmt.Errorf("failed reading tokens file %q: %w", filename, err) + } + + if err := json.Unmarshal(buf, &tokens); err != nil { + return tokens, fmt.Errorf("failed to decode tokens file %q: %w", filename, err) + } + + return tokens, nil +} + +func (p *fileStore) withPersistenceLock(s *Store, f func() error) error { + if err := f(); err != nil { + return err + } + + return p.saveToFile(s) +} + +func (p *fileStore) saveToFile(s *Store) error { + tokens := persistedTokens{} + if tok, source := s.UserTokenAndSource(); tok != "" && source == TokenSourceAPI { + tokens.Default = tok + } + + if tok, source := s.AgentTokenAndSource(); tok != "" && source == TokenSourceAPI { + tokens.Agent = tok + } + + if tok, source := s.AgentMasterTokenAndSource(); tok != "" && source == TokenSourceAPI { + tokens.AgentMaster = tok + } + + if tok, source := s.ReplicationTokenAndSource(); tok != "" && source == TokenSourceAPI { + tokens.Replication = tok + } + + data, err := json.Marshal(tokens) + if err != nil { + p.logger.Warn("failed to persist tokens", "error", err) + return fmt.Errorf("Failed to marshal tokens for persistence: %v", err) + } + + if err := file.WriteAtomicWithPerms(p.filename, data, 0700, 0600); err != nil { + p.logger.Warn("failed to persist tokens", "error", err) + return fmt.Errorf("Failed to persist tokens - %v", err) + } + return nil +} diff --git a/agent/token/persistence_test.go b/agent/token/persistence_test.go new file mode 100644 index 000000000..ec8e7e60e --- /dev/null +++ b/agent/token/persistence_test.go @@ -0,0 +1,213 @@ +package token + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestStore_Load(t *testing.T) { + dataDir := testutil.TempDir(t, "datadir") + tokenFile := filepath.Join(dataDir, tokensPath) + logger := hclog.New(nil) + store := new(Store) + + t.Run("with empty store", func(t *testing.T) { + cfg := Config{ + DataDir: dataDir, + ACLAgentToken: "alfa", + ACLAgentMasterToken: "bravo", + ACLDefaultToken: "charlie", + ACLReplicationToken: "delta", + } + require.NoError(t, store.Load(cfg, logger)) + require.Equal(t, "alfa", store.AgentToken()) + require.Equal(t, "bravo", store.AgentMasterToken()) + require.Equal(t, "charlie", store.UserToken()) + require.Equal(t, "delta", store.ReplicationToken()) + }) + + t.Run("updated from Config", func(t *testing.T) { + cfg := Config{ + DataDir: dataDir, + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentMasterToken: "golf", + ACLReplicationToken: "hotel", + } + // ensures no error for missing persisted tokens file + require.NoError(t, store.Load(cfg, logger)) + require.Equal(t, "echo", store.UserToken()) + require.Equal(t, "foxtrot", store.AgentToken()) + require.Equal(t, "golf", store.AgentMasterToken()) + require.Equal(t, "hotel", store.ReplicationToken()) + }) + + t.Run("with persisted tokens", func(t *testing.T) { + cfg := Config{ + DataDir: dataDir, + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentMasterToken: "golf", + ACLReplicationToken: "hotel", + } + + tokens := `{ + "agent" : "india", + "agent_master" : "juliett", + "default": "kilo", + "replication" : "lima" + }` + + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, store.Load(cfg, logger)) + + // no updates since token persistence is not enabled + require.Equal(t, "echo", store.UserToken()) + require.Equal(t, "foxtrot", store.AgentToken()) + require.Equal(t, "golf", store.AgentMasterToken()) + require.Equal(t, "hotel", store.ReplicationToken()) + + cfg.EnablePersistence = true + require.NoError(t, store.Load(cfg, logger)) + + require.Equal(t, "india", store.AgentToken()) + require.Equal(t, "juliett", store.AgentMasterToken()) + require.Equal(t, "kilo", store.UserToken()) + require.Equal(t, "lima", store.ReplicationToken()) + + // check store persistence was enabled + require.NotNil(t, store.persistence) + }) + + t.Run("with persisted tokens, persisted tokens override config", func(t *testing.T) { + tokens := `{ + "agent" : "mike", + "agent_master" : "november", + "default": "oscar", + "replication" : "papa" + }` + + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "quebec", + ACLAgentToken: "romeo", + ACLAgentMasterToken: "sierra", + ACLReplicationToken: "tango", + } + + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, store.Load(cfg, logger)) + + require.Equal(t, "mike", store.AgentToken()) + require.Equal(t, "november", store.AgentMasterToken()) + require.Equal(t, "oscar", store.UserToken()) + require.Equal(t, "papa", store.ReplicationToken()) + }) + + t.Run("with some persisted tokens", func(t *testing.T) { + tokens := `{ + "agent" : "uniform", + "agent_master" : "victor" + }` + + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "whiskey", + ACLAgentToken: "xray", + ACLAgentMasterToken: "yankee", + ACLReplicationToken: "zulu", + } + + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, store.Load(cfg, logger)) + + require.Equal(t, "uniform", store.AgentToken()) + require.Equal(t, "victor", store.AgentMasterToken()) + require.Equal(t, "whiskey", store.UserToken()) + require.Equal(t, "zulu", store.ReplicationToken()) + }) + + t.Run("persisted file contains invalid data", func(t *testing.T) { + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "one", + ACLAgentToken: "two", + ACLAgentMasterToken: "three", + ACLReplicationToken: "four", + } + + require.NoError(t, ioutil.WriteFile(tokenFile, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) + err := store.Load(cfg, logger) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to decode tokens file") + + require.Equal(t, "one", store.UserToken()) + require.Equal(t, "two", store.AgentToken()) + require.Equal(t, "three", store.AgentMasterToken()) + require.Equal(t, "four", store.ReplicationToken()) + }) + + t.Run("persisted file contains invalid json", func(t *testing.T) { + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "alfa", + ACLAgentToken: "bravo", + ACLAgentMasterToken: "charlie", + ACLReplicationToken: "foxtrot", + } + + require.NoError(t, ioutil.WriteFile(tokenFile, []byte("[1,2,3]"), 0600)) + err := store.Load(cfg, logger) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to decode tokens file") + + require.Equal(t, "alfa", store.UserToken()) + require.Equal(t, "bravo", store.AgentToken()) + require.Equal(t, "charlie", store.AgentMasterToken()) + require.Equal(t, "foxtrot", store.ReplicationToken()) + }) +} + +func TestStore_WithPersistenceLock(t *testing.T) { + dataDir := testutil.TempDir(t, "datadir") + store := new(Store) + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "default-token", + ACLAgentToken: "agent-token", + ACLAgentMasterToken: "master-token", + ACLReplicationToken: "replication-token", + } + err := store.Load(cfg, hclog.New(nil)) + require.NoError(t, err) + + f := func() error { + updated := store.UpdateUserToken("the-new-token", TokenSourceAPI) + require.True(t, updated) + + updated = store.UpdateAgentMasterToken("the-new-master-token", TokenSourceAPI) + require.True(t, updated) + return nil + } + + err = store.WithPersistenceLock(f) + require.NoError(t, err) + + tokens, err := readPersistedFromFile(filepath.Join(dataDir, tokensPath)) + require.NoError(t, err) + expected := persistedTokens{ + Default: "the-new-token", + AgentMaster: "the-new-master-token", + } + require.Equal(t, expected, tokens) +} diff --git a/agent/token/store.go b/agent/token/store.go index 56ab7d806..456190f70 100644 --- a/agent/token/store.go +++ b/agent/token/store.go @@ -77,6 +77,12 @@ type Store struct { watchers map[int]watcher watcherIndex int + persistence *fileStore + // persistenceLock is used to synchronize access to the persisted token store + // within the data directory. This will prevent loading while writing as well as + // multiple concurrent writes. + persistenceLock sync.RWMutex + // enterpriseTokens contains tokens only used in consul-enterprise enterpriseTokens } @@ -158,7 +164,7 @@ func (t *Store) sendNotificationLocked(kinds ...TokenKind) { // Returns true if it was changed. func (t *Store) UpdateUserToken(token string, source TokenSource) bool { t.l.Lock() - changed := (t.userToken != token || t.userTokenSource != source) + changed := t.userToken != token || t.userTokenSource != source t.userToken = token t.userTokenSource = source if changed { @@ -172,7 +178,7 @@ func (t *Store) UpdateUserToken(token string, source TokenSource) bool { // Returns true if it was changed. func (t *Store) UpdateAgentToken(token string, source TokenSource) bool { t.l.Lock() - changed := (t.agentToken != token || t.agentTokenSource != source) + changed := t.agentToken != token || t.agentTokenSource != source t.agentToken = token t.agentTokenSource = source if changed { @@ -186,7 +192,7 @@ func (t *Store) UpdateAgentToken(token string, source TokenSource) bool { // Returns true if it was changed. func (t *Store) UpdateAgentMasterToken(token string, source TokenSource) bool { t.l.Lock() - changed := (t.agentMasterToken != token || t.agentMasterTokenSource != source) + changed := t.agentMasterToken != token || t.agentMasterTokenSource != source t.agentMasterToken = token t.agentMasterTokenSource = source if changed { @@ -200,7 +206,7 @@ func (t *Store) UpdateAgentMasterToken(token string, source TokenSource) bool { // Returns true if it was changed. func (t *Store) UpdateReplicationToken(token string, source TokenSource) bool { t.l.Lock() - changed := (t.replicationToken != token || t.replicationTokenSource != source) + changed := t.replicationToken != token || t.replicationTokenSource != source t.replicationToken = token t.replicationTokenSource = source if changed { diff --git a/agent/token/store_oss.go b/agent/token/store_oss.go index 0a182d826..97461edc2 100644 --- a/agent/token/store_oss.go +++ b/agent/token/store_oss.go @@ -7,6 +7,10 @@ type enterpriseTokens struct { } // enterpriseAgentToken OSS stub -func (s *Store) enterpriseAgentToken() string { +func (t *Store) enterpriseAgentToken() string { return "" } + +// loadEnterpriseTokens is a noop stub for the func defined agent_ent.go +func loadEnterpriseTokens(_ *Store, _ Config) { +} From 8e477feb22931261ee0e8827b7bf488307af6f2a Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 25 Aug 2020 00:10:12 -0400 Subject: [PATCH 14/73] config: use token.Config for ACLToken config Using the target Config struct reduces the amount of copying and translating of configuration structs. --- agent/auto-config/auto_config.go | 2 +- agent/config/builder.go | 38 ++++++++++++++++++-------------- agent/config/runtime.go | 36 ------------------------------ agent/config/runtime_test.go | 37 ++++++++++++++++++++++--------- 4 files changed, 49 insertions(+), 64 deletions(-) diff --git a/agent/auto-config/auto_config.go b/agent/auto-config/auto_config.go index c2dd942c6..335f0f987 100644 --- a/agent/auto-config/auto_config.go +++ b/agent/auto-config/auto_config.go @@ -222,7 +222,7 @@ func (ac *AutoConfig) recordInitialConfiguration(resp *pbautoconf.AutoConfigResp } // ignoring the return value which would indicate a change in the token - _ = ac.acConfig.Tokens.UpdateAgentToken(config.ACLAgentToken, token.TokenSourceConfig) + _ = ac.acConfig.Tokens.UpdateAgentToken(config.ACLTokens.ACLAgentToken, token.TokenSourceConfig) // extra a structs.SignedResponse from the AutoConfigResponse for use in cache prepopulation signed, err := extractSignedResponse(resp) diff --git a/agent/config/builder.go b/agent/config/builder.go index 040b39aee..3dc12c463 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" "github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" libtempl "github.com/hashicorp/consul/lib/template" @@ -799,6 +800,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { // ---------------------------------------------------------------- // build runtime config // + dataDir := b.stringVal(c.DataDir) rt = RuntimeConfig{ // non-user configurable values ACLDisabledTTL: b.durationVal("acl.disabled_ttl", c.ACL.DisabledTTL), @@ -837,21 +839,25 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { GossipWANRetransmitMult: b.intVal(c.GossipWAN.RetransmitMult), // ACL - ACLsEnabled: aclsEnabled, - ACLAgentMasterToken: b.stringValWithDefault(c.ACL.Tokens.AgentMaster, b.stringVal(c.ACLAgentMasterToken)), - ACLAgentToken: b.stringValWithDefault(c.ACL.Tokens.Agent, b.stringVal(c.ACLAgentToken)), - ACLDatacenter: primaryDatacenter, - ACLDefaultPolicy: b.stringValWithDefault(c.ACL.DefaultPolicy, b.stringVal(c.ACLDefaultPolicy)), - ACLDownPolicy: b.stringValWithDefault(c.ACL.DownPolicy, b.stringVal(c.ACLDownPolicy)), - ACLEnableKeyListPolicy: b.boolValWithDefault(c.ACL.EnableKeyListPolicy, b.boolVal(c.ACLEnableKeyListPolicy)), - ACLMasterToken: b.stringValWithDefault(c.ACL.Tokens.Master, b.stringVal(c.ACLMasterToken)), - ACLReplicationToken: b.stringValWithDefault(c.ACL.Tokens.Replication, b.stringVal(c.ACLReplicationToken)), - ACLTokenTTL: b.durationValWithDefault("acl.token_ttl", c.ACL.TokenTTL, b.durationVal("acl_ttl", c.ACLTTL)), - ACLPolicyTTL: b.durationVal("acl.policy_ttl", c.ACL.PolicyTTL), - ACLRoleTTL: b.durationVal("acl.role_ttl", c.ACL.RoleTTL), - ACLToken: b.stringValWithDefault(c.ACL.Tokens.Default, b.stringVal(c.ACLToken)), - ACLTokenReplication: b.boolValWithDefault(c.ACL.TokenReplication, b.boolValWithDefault(c.EnableACLReplication, enableTokenReplication)), - ACLEnableTokenPersistence: b.boolValWithDefault(c.ACL.EnableTokenPersistence, false), + ACLsEnabled: aclsEnabled, + ACLDatacenter: primaryDatacenter, + ACLDefaultPolicy: b.stringValWithDefault(c.ACL.DefaultPolicy, b.stringVal(c.ACLDefaultPolicy)), + ACLDownPolicy: b.stringValWithDefault(c.ACL.DownPolicy, b.stringVal(c.ACLDownPolicy)), + ACLEnableKeyListPolicy: b.boolValWithDefault(c.ACL.EnableKeyListPolicy, b.boolVal(c.ACLEnableKeyListPolicy)), + ACLMasterToken: b.stringValWithDefault(c.ACL.Tokens.Master, b.stringVal(c.ACLMasterToken)), + ACLTokenTTL: b.durationValWithDefault("acl.token_ttl", c.ACL.TokenTTL, b.durationVal("acl_ttl", c.ACLTTL)), + ACLPolicyTTL: b.durationVal("acl.policy_ttl", c.ACL.PolicyTTL), + ACLRoleTTL: b.durationVal("acl.role_ttl", c.ACL.RoleTTL), + ACLTokenReplication: b.boolValWithDefault(c.ACL.TokenReplication, b.boolValWithDefault(c.EnableACLReplication, enableTokenReplication)), + + ACLTokens: token.Config{ + DataDir: dataDir, + EnablePersistence: b.boolValWithDefault(c.ACL.EnableTokenPersistence, false), + ACLDefaultToken: b.stringValWithDefault(c.ACL.Tokens.Default, b.stringVal(c.ACLToken)), + ACLAgentToken: b.stringValWithDefault(c.ACL.Tokens.Agent, b.stringVal(c.ACLAgentToken)), + ACLAgentMasterToken: b.stringValWithDefault(c.ACL.Tokens.AgentMaster, b.stringVal(c.ACLAgentMasterToken)), + ACLReplicationToken: b.stringValWithDefault(c.ACL.Tokens.Replication, b.stringVal(c.ACLReplicationToken)), + }, // Autopilot AutopilotCleanupDeadServers: b.boolVal(c.Autopilot.CleanupDeadServers), @@ -957,7 +963,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { ConnectTestCALeafRootChangeSpread: b.durationVal("connect.test_ca_leaf_root_change_spread", c.Connect.TestCALeafRootChangeSpread), ExposeMinPort: exposeMinPort, ExposeMaxPort: exposeMaxPort, - DataDir: b.stringVal(c.DataDir), + DataDir: dataDir, Datacenter: datacenter, DefaultQueryTime: b.durationVal("default_query_time", c.DefaultQueryTime), DevMode: b.boolVal(b.devMode), diff --git a/agent/config/runtime.go b/agent/config/runtime.go index dd536bb8c..757785422 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -64,24 +64,8 @@ type RuntimeConfig struct { // hcl: acl.enabled = boolean ACLsEnabled bool - // TODO: remove old fields - // TODO: set DataDir as well ACLTokens token.Config - // ACLAgentMasterToken is a special token that has full read and write - // privileges for this agent, and can be used to call agent endpoints - // when no servers are available. - // - // hcl: acl.tokens.agent_master = string - ACLAgentMasterToken string - - // ACLAgentToken is the default token used to make requests for the agent - // itself, such as for registering itself with the catalog. If not - // configured, the 'acl_token' will be used. - // - // hcl: acl.tokens.agent = string - ACLAgentToken string - // ACLDatacenter is the central datacenter that holds authoritative // ACL records. This must be the same for the entire cluster. // If this is not set, ACLs are not enabled. Off by default. @@ -128,16 +112,6 @@ type RuntimeConfig struct { // hcl: acl.tokens.master = string ACLMasterToken string - // ACLReplicationToken is used to replicate data locally from the - // PrimaryDatacenter. Replication is only available on servers in - // datacenters other than the PrimaryDatacenter - // - // DEPRECATED (ACL-Legacy-Compat): Setting this to a non-empty value - // also enables legacy ACL replication if ACLs are enabled and in legacy mode. - // - // hcl: acl.tokens.replication = string - ACLReplicationToken string - // ACLtokenReplication is used to indicate that both tokens and policies // should be replicated instead of just policies // @@ -162,16 +136,6 @@ type RuntimeConfig struct { // hcl: acl.role_ttl = "duration" ACLRoleTTL time.Duration - // ACLToken is the default token used to make requests if a per-request - // token is not provided. If not configured the 'anonymous' token is used. - // - // hcl: acl.tokens.default = string - ACLToken string - - // ACLEnableTokenPersistence determines whether or not tokens set via the agent HTTP API - // should be persisted to disk and reloaded when an agent restarts. - ACLEnableTokenPersistence bool - // AutopilotCleanupDeadServers enables the automatic cleanup of dead servers when new ones // are added to the peer list. Defaults to true. // diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index adbc269e6..48aafea6b 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/sdk/testutil" @@ -1613,7 +1614,7 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { json: []string{`{ "acl_replication_token": "a" }`}, hcl: []string{`acl_replication_token = "a"`}, patch: func(rt *RuntimeConfig) { - rt.ACLReplicationToken = "a" + rt.ACLTokens.ACLReplicationToken = "a" rt.ACLTokenReplication = true rt.DataDir = dataDir }, @@ -4350,6 +4351,13 @@ func testConfig(t *testing.T, tests []configTest, dataDir string) { if tt.patch != nil { tt.patch(&expected) } + + // both DataDir fields should always be the same, so test for the + // invariant, and than updated the expected, so that every test + // case does not need to set this field. + require.Equal(t, actual.DataDir, actual.ACLTokens.DataDir) + expected.ACLTokens.DataDir = actual.ACLTokens.DataDir + require.Equal(t, expected, actual) }) } @@ -5843,20 +5851,24 @@ func TestFullConfig(t *testing.T) { // user configurable values - ACLAgentMasterToken: "64fd0e08", - ACLAgentToken: "bed2377c", + ACLTokens: token.Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "418fdff1", + ACLAgentToken: "bed2377c", + ACLAgentMasterToken: "64fd0e08", + ACLReplicationToken: "5795983a", + }, + ACLsEnabled: true, ACLDatacenter: "ejtmd43d", ACLDefaultPolicy: "72c2e7a0", ACLDownPolicy: "03eb2aee", ACLEnableKeyListPolicy: true, - ACLEnableTokenPersistence: true, ACLMasterToken: "8a19ac27", - ACLReplicationToken: "5795983a", ACLTokenTTL: 3321 * time.Second, ACLPolicyTTL: 1123 * time.Second, ACLRoleTTL: 9876 * time.Second, - ACLToken: "418fdff1", ACLTokenReplication: true, AdvertiseAddrLAN: ipAddr("17.99.29.16"), AdvertiseAddrWAN: ipAddr("78.63.37.19"), @@ -6804,21 +6816,24 @@ func TestSanitize(t *testing.T) { } rtJSON := `{ - "ACLAgentMasterToken": "hidden", - "ACLAgentToken": "hidden", + "ACLTokens": { + "ACLAgentMasterToken": "hidden", + "ACLAgentToken": "hidden", + "ACLDefaultToken": "hidden", + "ACLReplicationToken": "hidden", + "DataDir": "", + "EnablePersistence": false + }, "ACLDatacenter": "", "ACLDefaultPolicy": "", "ACLDisabledTTL": "0s", "ACLDownPolicy": "", "ACLEnableKeyListPolicy": false, - "ACLEnableTokenPersistence": false, "ACLMasterToken": "hidden", "ACLPolicyTTL": "0s", - "ACLReplicationToken": "hidden", "ACLRoleTTL": "0s", "ACLTokenReplication": false, "ACLTokenTTL": "0s", - "ACLToken": "hidden", "ACLsEnabled": false, "AEInterval": "0s", "AdvertiseAddrLAN": "", From 9535a1b57da1fbf3228850824b25a65bc01420f6 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Thu, 27 Aug 2020 13:15:10 -0400 Subject: [PATCH 15/73] token: OSS support for enterprise tokens --- agent/config/builder.go | 9 ++++----- agent/config/builder_oss.go | 8 ++++++-- agent/config/runtime_oss_test.go | 6 ++---- agent/config/runtime_test.go | 4 +++- agent/token/persistence.go | 2 ++ agent/token/store_oss.go | 3 +++ 6 files changed, 20 insertions(+), 12 deletions(-) diff --git a/agent/config/builder.go b/agent/config/builder.go index 3dc12c463..4b9aab1b7 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1078,10 +1078,8 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_rate must be strictly positive, was: %v", rt.Cache.EntryFetchRate) } - if entCfg, err := b.BuildEnterpriseRuntimeConfig(&c); err != nil { - return RuntimeConfig{}, err - } else { - rt.EnterpriseRuntimeConfig = entCfg + if err := b.BuildEnterpriseRuntimeConfig(&rt, &c); err != nil { + return rt, err } if rt.BootstrapExpect == 1 { @@ -1369,7 +1367,8 @@ func (b *Builder) Validate(rt RuntimeConfig) error { b.warn(err.Error()) } - return nil + err := b.validateEnterpriseConfig(rt) + return err } // addrUnique checks if the given address is already in use for another diff --git a/agent/config/builder_oss.go b/agent/config/builder_oss.go index b585cab50..85cf08137 100644 --- a/agent/config/builder_oss.go +++ b/agent/config/builder_oss.go @@ -51,8 +51,12 @@ func (e enterpriseConfigKeyError) Error() string { return fmt.Sprintf("%q is a Consul Enterprise configuration and will have no effect", e.key) } -func (_ *Builder) BuildEnterpriseRuntimeConfig(_ *Config) (EnterpriseRuntimeConfig, error) { - return EnterpriseRuntimeConfig{}, nil +func (*Builder) BuildEnterpriseRuntimeConfig(_ *RuntimeConfig, _ *Config) error { + return nil +} + +func (*Builder) validateEnterpriseConfig(_ RuntimeConfig) error { + return nil } // validateEnterpriseConfig is a function to validate the enterprise specific diff --git a/agent/config/runtime_oss_test.go b/agent/config/runtime_oss_test.go index 3871940c5..b6eee07e2 100644 --- a/agent/config/runtime_oss_test.go +++ b/agent/config/runtime_oss_test.go @@ -6,11 +6,9 @@ var entMetaJSON = `{}` var entRuntimeConfigSanitize = `{}` -var entFullDNSJSONConfig = `` +var entTokenConfigSanitize = `"EnterpriseConfig": {},` -var entFullDNSHCLConfig = `` - -var entFullRuntimeConfig = EnterpriseRuntimeConfig{} +func entFullRuntimeConfig(rt *RuntimeConfig) {} var enterpriseNonVotingServerWarnings []string = []string{enterpriseConfigKeyError{key: "non_voting_server"}.Error()} diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 48aafea6b..d56cce977 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -6497,9 +6497,10 @@ func TestFullConfig(t *testing.T) { "args": []interface{}{"dltjDJ2a", "flEa7C2d"}, }, }, - EnterpriseRuntimeConfig: entFullRuntimeConfig, } + entFullRuntimeConfig(&want) + warns := []string{ `The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.`, `bootstrap_expect > 0: expecting 53 servers`, @@ -6817,6 +6818,7 @@ func TestSanitize(t *testing.T) { rtJSON := `{ "ACLTokens": { + ` + entTokenConfigSanitize + ` "ACLAgentMasterToken": "hidden", "ACLAgentToken": "hidden", "ACLDefaultToken": "hidden", diff --git a/agent/token/persistence.go b/agent/token/persistence.go index 06861a40a..c36b90364 100644 --- a/agent/token/persistence.go +++ b/agent/token/persistence.go @@ -23,6 +23,8 @@ type Config struct { ACLAgentToken string ACLAgentMasterToken string ACLReplicationToken string + + EnterpriseConfig } const tokensPath = "acl-tokens.json" diff --git a/agent/token/store_oss.go b/agent/token/store_oss.go index 97461edc2..16123052e 100644 --- a/agent/token/store_oss.go +++ b/agent/token/store_oss.go @@ -2,6 +2,9 @@ package token +type EnterpriseConfig struct { +} + // Stub for enterpriseTokens type enterpriseTokens struct { } From 8ae3332165955d720d64bbb9ae2fa4f3daf24427 Mon Sep 17 00:00:00 2001 From: Jasmine W Date: Tue, 1 Sep 2020 10:14:13 -0500 Subject: [PATCH 16/73] docs: update structure (#8506) - moved and renamed files/folders based on new structure - updated docs navigation based on new structure - moved CLI to top nav (created commands.jsx and commands-navigation.js) - updated and added redirects - updating to be consistent with standalone categories - changing "overview" link in top nav to lead to where intro was moved (docs/intro) - adding redirects for intro content - deleting old intro folders - format all data/navigation files - deleting old commands folder - reverting changes to glossary page - adjust intro navigation for removal of 'vs' paths - add helm page redirect - fix more redirects - add a missing redirect - fix broken anchor links and formatting mistakes - deleted duplicate section, added redirect, changed link - removed duplicate glossary page --- website/_redirects | 173 +++++++++++- website/components/learn-callout/style.css | 4 - website/data/commands-navigation.js | 78 ++++++ website/data/docs-navigation.js | 257 ++++++++---------- website/data/intro-navigation.js | 20 +- website/data/subnav.js | 7 +- website/layouts/commands.jsx | 41 +++ website/package.json | 4 +- website/pages/api-docs/acl/auth-methods.mdx | 4 +- website/pages/api-docs/acl/index.mdx | 2 +- .../commands/acl/auth-method/create.mdx | 2 +- .../commands/acl/auth-method/delete.mdx | 2 +- .../commands/acl/auth-method/index.mdx | 2 +- .../commands/acl/auth-method/list.mdx | 2 +- .../commands/acl/auth-method/read.mdx | 2 +- .../commands/acl/auth-method/update.mdx | 2 +- .../commands/acl/binding-rule/create.mdx | 2 +- .../commands/acl/binding-rule/delete.mdx | 2 +- .../commands/acl/binding-rule/index.mdx | 2 +- .../commands/acl/binding-rule/list.mdx | 2 +- .../commands/acl/binding-rule/read.mdx | 2 +- .../commands/acl/binding-rule/update.mdx | 2 +- .../{docs => }/commands/acl/bootstrap.mdx | 4 +- .../pages/{docs => }/commands/acl/index.mdx | 2 +- .../{docs => }/commands/acl/policy/create.mdx | 2 +- .../{docs => }/commands/acl/policy/delete.mdx | 2 +- .../{docs => }/commands/acl/policy/index.mdx | 2 +- .../{docs => }/commands/acl/policy/list.mdx | 2 +- .../{docs => }/commands/acl/policy/read.mdx | 2 +- .../{docs => }/commands/acl/policy/update.mdx | 2 +- .../{docs => }/commands/acl/role/create.mdx | 2 +- .../{docs => }/commands/acl/role/delete.mdx | 2 +- .../{docs => }/commands/acl/role/index.mdx | 2 +- .../{docs => }/commands/acl/role/list.mdx | 2 +- .../{docs => }/commands/acl/role/read.mdx | 2 +- .../{docs => }/commands/acl/role/update.mdx | 2 +- .../commands/acl/set-agent-token.mdx | 2 +- .../{docs => }/commands/acl/token/clone.mdx | 2 +- .../{docs => }/commands/acl/token/create.mdx | 2 +- .../{docs => }/commands/acl/token/delete.mdx | 2 +- .../{docs => }/commands/acl/token/index.mdx | 2 +- .../{docs => }/commands/acl/token/list.mdx | 2 +- .../{docs => }/commands/acl/token/read.mdx | 2 +- .../{docs => }/commands/acl/token/update.mdx | 6 +- .../commands/acl/translate-rules.mdx | 2 +- website/pages/{docs => }/commands/agent.mdx | 2 +- .../commands/catalog/datacenters.mdx | 2 +- .../{docs => }/commands/catalog/index.mdx | 2 +- .../{docs => }/commands/catalog/nodes.mdx | 2 +- .../{docs => }/commands/catalog/services.mdx | 2 +- .../{docs => }/commands/config/delete.mdx | 2 +- .../{docs => }/commands/config/index.mdx | 2 +- .../pages/{docs => }/commands/config/list.mdx | 2 +- .../pages/{docs => }/commands/config/read.mdx | 2 +- .../{docs => }/commands/config/write.mdx | 2 +- .../pages/{docs => }/commands/connect/ca.mdx | 2 +- .../{docs => }/commands/connect/envoy.mdx | 2 +- .../{docs => }/commands/connect/expose.mdx | 2 +- .../{docs => }/commands/connect/index.mdx | 2 +- .../{docs => }/commands/connect/proxy.mdx | 2 +- website/pages/{docs => }/commands/debug.mdx | 2 +- website/pages/{docs => }/commands/event.mdx | 2 +- website/pages/{docs => }/commands/exec.mdx | 2 +- .../pages/{docs => }/commands/force-leave.mdx | 4 +- website/pages/{docs => }/commands/index.mdx | 2 +- website/pages/{docs => }/commands/info.mdx | 2 +- .../{docs => }/commands/intention/check.mdx | 2 +- .../{docs => }/commands/intention/create.mdx | 2 +- .../{docs => }/commands/intention/delete.mdx | 2 +- .../{docs => }/commands/intention/get.mdx | 2 +- .../{docs => }/commands/intention/index.mdx | 2 +- .../{docs => }/commands/intention/match.mdx | 2 +- website/pages/{docs => }/commands/join.mdx | 2 +- website/pages/{docs => }/commands/keygen.mdx | 2 +- website/pages/{docs => }/commands/keyring.mdx | 2 +- .../pages/{docs => }/commands/kv/delete.mdx | 2 +- .../pages/{docs => }/commands/kv/export.mdx | 2 +- website/pages/{docs => }/commands/kv/get.mdx | 2 +- .../pages/{docs => }/commands/kv/import.mdx | 2 +- .../pages/{docs => }/commands/kv/index.mdx | 2 +- website/pages/{docs => }/commands/kv/put.mdx | 2 +- website/pages/{docs => }/commands/leave.mdx | 2 +- website/pages/{docs => }/commands/license.mdx | 6 +- website/pages/{docs => }/commands/lock.mdx | 6 +- website/pages/{docs => }/commands/login.mdx | 2 +- website/pages/{docs => }/commands/logout.mdx | 2 +- website/pages/{docs => }/commands/maint.mdx | 2 +- website/pages/{docs => }/commands/members.mdx | 2 +- website/pages/{docs => }/commands/monitor.mdx | 2 +- .../{docs => }/commands/namespace/create.mdx | 2 +- .../{docs => }/commands/namespace/delete.mdx | 2 +- .../{docs => }/commands/namespace/index.mdx | 2 +- .../{docs => }/commands/namespace/list.mdx | 2 +- .../{docs => }/commands/namespace/read.mdx | 2 +- .../{docs => }/commands/namespace/update.mdx | 2 +- .../{docs => }/commands/namespace/write.mdx | 2 +- .../{docs => }/commands/operator/area.mdx | 4 +- .../commands/operator/autopilot.mdx | 6 +- .../{docs => }/commands/operator/index.mdx | 10 +- .../{docs => }/commands/operator/raft.mdx | 2 +- website/pages/{docs => }/commands/reload.mdx | 2 +- website/pages/{docs => }/commands/rtt.mdx | 2 +- .../commands/services/deregister.mdx | 2 +- .../{docs => }/commands/services/index.mdx | 2 +- .../{docs => }/commands/services/register.mdx | 2 +- .../{docs => }/commands/snapshot/agent.mdx | 2 +- .../{docs => }/commands/snapshot/index.mdx | 2 +- .../{docs => }/commands/snapshot/inspect.mdx | 2 +- .../{docs => }/commands/snapshot/restore.mdx | 2 +- .../{docs => }/commands/snapshot/save.mdx | 2 +- website/pages/{docs => }/commands/tls/ca.mdx | 2 +- .../pages/{docs => }/commands/tls/cert.mdx | 2 +- .../pages/{docs => }/commands/tls/index.mdx | 2 +- .../pages/{docs => }/commands/validate.mdx | 2 +- website/pages/{docs => }/commands/version.mdx | 2 +- website/pages/{docs => }/commands/watch.mdx | 2 +- website/pages/docs/agent/options.mdx | 64 ++--- .../anti-entropy.mdx | 0 .../{internals => architecture}/consensus.mdx | 2 +- .../coordinates.mdx | 0 .../{internals => architecture}/gossip.mdx | 4 +- .../index.mdx} | 0 .../{internals => architecture}/jepsen.mdx | 0 .../pages/docs/connect/connect-internals.mdx | 4 +- .../docs/connect/gateways/ingress-gateway.mdx | 4 +- .../index.mdx} | 4 +- .../wan-federation-via-mesh-gateways.mdx | 6 +- .../connect/gateways/terminating-gateway.mdx | 6 +- website/pages/docs/connect/index.mdx | 4 +- website/pages/docs/connect/intentions.mdx | 4 +- .../l7-traffic}/discovery-chain.mdx | 0 .../index.mdx} | 0 .../pages/docs/connect/proxies/built-in.mdx | 2 +- .../connect/proxies/managed-deprecated.mdx | 8 +- website/pages/docs/connect/security.mdx | 2 +- .../docs/{agent => discovery}/checks.mdx | 4 +- .../pages/docs/{agent => discovery}/dns.mdx | 4 +- .../docs/{agent => discovery}/services.mdx | 4 +- .../index.mdx => docs/download-tools.mdx} | 7 +- .../docs/{agent => dynamic-app-config}/kv.mdx | 0 .../sessions.mdx | 0 .../{agent => dynamic-app-config}/watches.mdx | 0 website/pages/docs/enterprise/index.mdx | 4 +- website/pages/docs/guides/acl-legacy.mdx | 8 +- website/pages/docs/guides/consul-f5.mdx | 2 +- .../pages/docs/guides/consul-splitting.mdx | 10 +- .../docs/guides/kuberenetes-deployment.mdx | 20 +- .../guides/kubernetes-production-deploy.mdx | 16 +- .../docs/guides/managing-acl-policies.mdx | 6 +- website/pages/docs/guides/servers.mdx | 4 +- .../{agent => install}/cloud-auto-join.mdx | 5 +- website/pages/docs/{ => install}/glossary.mdx | 2 +- website/pages/docs/install/index.mdx | 4 +- website/pages/docs/install/performance.mdx | 14 +- website/pages/docs/internals/acl.mdx | 4 +- website/pages/{ => docs}/intro/index.mdx | 8 +- .../pages/{ => docs}/intro/vs/chef-puppet.mdx | 2 +- website/pages/{ => docs}/intro/vs/custom.mdx | 2 +- website/pages/{ => docs}/intro/vs/eureka.mdx | 2 +- website/pages/{ => docs}/intro/vs/index.mdx | 2 +- website/pages/{ => docs}/intro/vs/istio.mdx | 2 +- .../{ => docs}/intro/vs/nagios-sensu.mdx | 2 +- website/pages/{ => docs}/intro/vs/proxies.mdx | 2 +- website/pages/{ => docs}/intro/vs/serf.mdx | 2 +- website/pages/{ => docs}/intro/vs/skydns.mdx | 2 +- .../pages/{ => docs}/intro/vs/smartstack.mdx | 2 +- .../pages/{ => docs}/intro/vs/zookeeper.mdx | 2 +- .../docs/k8s/connect/ingress-gateways.mdx | 2 +- .../servers-outside-kubernetes.mdx | 6 +- .../docs/k8s/{ => installation}/helm.mdx | 12 +- website/pages/docs/k8s/installation/index.mdx | 2 +- .../installation/multi-cluster/kubernetes.mdx | 6 +- .../multi-cluster/vms-and-kubernetes.mdx | 6 +- .../tls-on-existing-cluster.mdx | 6 +- .../uninstalling.mdx => uninstall.mdx} | 8 +- .../{operations/upgrading.mdx => upgrade.mdx} | 8 +- .../docs/{ => security}/acl/acl-legacy.mdx | 0 .../{ => security}/acl/acl-migrate-tokens.mdx | 0 .../docs/{ => security}/acl/acl-rules.mdx | 0 .../docs/{ => security}/acl/acl-system.mdx | 4 +- .../{ => security}/acl/auth-methods/index.mdx | 0 .../{ => security}/acl/auth-methods/jwt.mdx | 0 .../acl/auth-methods/kubernetes.mdx | 0 .../{ => security}/acl/auth-methods/oidc.mdx | 0 .../pages/docs/{ => security}/acl/index.mdx | 0 .../docs/{agent => security}/encryption.mdx | 0 .../security.mdx => security/index.mdx} | 4 +- .../docs/{ => troubleshoot}/common-errors.mdx | 0 website/pages/docs/{ => troubleshoot}/faq.mdx | 0 website/pages/docs/upgrading/index.mdx | 4 +- website/pages/downloads/index.jsx | 2 +- website/pages/intro/getting-started/agent.mdx | 2 +- .../use-cases/multi-platform-service-mesh.jsx | 7 +- .../network-infrastructure-automation.jsx | 2 +- .../service-discovery-and-health-checking.jsx | 2 +- 195 files changed, 689 insertions(+), 488 deletions(-) create mode 100644 website/data/commands-navigation.js create mode 100644 website/layouts/commands.jsx rename website/pages/{docs => }/commands/acl/auth-method/create.mdx (99%) rename website/pages/{docs => }/commands/acl/auth-method/delete.mdx (97%) rename website/pages/{docs => }/commands/acl/auth-method/index.mdx (99%) rename website/pages/{docs => }/commands/acl/auth-method/list.mdx (98%) rename website/pages/{docs => }/commands/acl/auth-method/read.mdx (98%) rename website/pages/{docs => }/commands/acl/auth-method/update.mdx (99%) rename website/pages/{docs => }/commands/acl/binding-rule/create.mdx (99%) rename website/pages/{docs => }/commands/acl/binding-rule/delete.mdx (97%) rename website/pages/{docs => }/commands/acl/binding-rule/index.mdx (99%) rename website/pages/{docs => }/commands/acl/binding-rule/list.mdx (99%) rename website/pages/{docs => }/commands/acl/binding-rule/read.mdx (98%) rename website/pages/{docs => }/commands/acl/binding-rule/update.mdx (99%) rename website/pages/{docs => }/commands/acl/bootstrap.mdx (86%) rename website/pages/{docs => }/commands/acl/index.mdx (99%) rename website/pages/{docs => }/commands/acl/policy/create.mdx (99%) rename website/pages/{docs => }/commands/acl/policy/delete.mdx (98%) rename website/pages/{docs => }/commands/acl/policy/index.mdx (99%) rename website/pages/{docs => }/commands/acl/policy/list.mdx (99%) rename website/pages/{docs => }/commands/acl/policy/read.mdx (99%) rename website/pages/{docs => }/commands/acl/policy/update.mdx (99%) rename website/pages/{docs => }/commands/acl/role/create.mdx (99%) rename website/pages/{docs => }/commands/acl/role/delete.mdx (98%) rename website/pages/{docs => }/commands/acl/role/index.mdx (99%) rename website/pages/{docs => }/commands/acl/role/list.mdx (99%) rename website/pages/{docs => }/commands/acl/role/read.mdx (98%) rename website/pages/{docs => }/commands/acl/role/update.mdx (99%) rename website/pages/{docs => }/commands/acl/set-agent-token.mdx (98%) rename website/pages/{docs => }/commands/acl/token/clone.mdx (98%) rename website/pages/{docs => }/commands/acl/token/create.mdx (99%) rename website/pages/{docs => }/commands/acl/token/delete.mdx (97%) rename website/pages/{docs => }/commands/acl/token/index.mdx (99%) rename website/pages/{docs => }/commands/acl/token/list.mdx (99%) rename website/pages/{docs => }/commands/acl/token/read.mdx (99%) rename website/pages/{docs => }/commands/acl/token/update.mdx (96%) rename website/pages/{docs => }/commands/acl/translate-rules.mdx (99%) rename website/pages/{docs => }/commands/agent.mdx (97%) rename website/pages/{docs => }/commands/catalog/datacenters.mdx (96%) rename website/pages/{docs => }/commands/catalog/index.mdx (99%) rename website/pages/{docs => }/commands/catalog/nodes.mdx (99%) rename website/pages/{docs => }/commands/catalog/services.mdx (98%) rename website/pages/{docs => }/commands/config/delete.mdx (97%) rename website/pages/{docs => }/commands/config/index.mdx (98%) rename website/pages/{docs => }/commands/config/list.mdx (97%) rename website/pages/{docs => }/commands/config/read.mdx (98%) rename website/pages/{docs => }/commands/config/write.mdx (99%) rename website/pages/{docs => }/commands/connect/ca.mdx (99%) rename website/pages/{docs => }/commands/connect/envoy.mdx (99%) rename website/pages/{docs => }/commands/connect/expose.mdx (99%) rename website/pages/{docs => }/commands/connect/index.mdx (98%) rename website/pages/{docs => }/commands/connect/proxy.mdx (99%) rename website/pages/{docs => }/commands/debug.mdx (99%) rename website/pages/{docs => }/commands/event.mdx (99%) rename website/pages/{docs => }/commands/exec.mdx (99%) rename website/pages/{docs => }/commands/force-leave.mdx (95%) rename website/pages/{docs => }/commands/index.mdx (99%) rename website/pages/{docs => }/commands/info.mdx (99%) rename website/pages/{docs => }/commands/intention/check.mdx (98%) rename website/pages/{docs => }/commands/intention/create.mdx (98%) rename website/pages/{docs => }/commands/intention/delete.mdx (97%) rename website/pages/{docs => }/commands/intention/get.mdx (97%) rename website/pages/{docs => }/commands/intention/index.mdx (99%) rename website/pages/{docs => }/commands/intention/match.mdx (98%) rename website/pages/{docs => }/commands/join.mdx (98%) rename website/pages/{docs => }/commands/keygen.mdx (97%) rename website/pages/{docs => }/commands/keyring.mdx (99%) rename website/pages/{docs => }/commands/kv/delete.mdx (99%) rename website/pages/{docs => }/commands/kv/export.mdx (97%) rename website/pages/{docs => }/commands/kv/get.mdx (99%) rename website/pages/{docs => }/commands/kv/import.mdx (98%) rename website/pages/{docs => }/commands/kv/index.mdx (99%) rename website/pages/{docs => }/commands/kv/put.mdx (99%) rename website/pages/{docs => }/commands/leave.mdx (98%) rename website/pages/{docs => }/commands/license.mdx (95%) rename website/pages/{docs => }/commands/lock.mdx (97%) rename website/pages/{docs => }/commands/login.mdx (99%) rename website/pages/{docs => }/commands/logout.mdx (97%) rename website/pages/{docs => }/commands/maint.mdx (99%) rename website/pages/{docs => }/commands/members.mdx (98%) rename website/pages/{docs => }/commands/monitor.mdx (98%) rename website/pages/{docs => }/commands/namespace/create.mdx (99%) rename website/pages/{docs => }/commands/namespace/delete.mdx (97%) rename website/pages/{docs => }/commands/namespace/index.mdx (99%) rename website/pages/{docs => }/commands/namespace/list.mdx (99%) rename website/pages/{docs => }/commands/namespace/read.mdx (98%) rename website/pages/{docs => }/commands/namespace/update.mdx (99%) rename website/pages/{docs => }/commands/namespace/write.mdx (98%) rename website/pages/{docs => }/commands/operator/area.mdx (98%) rename website/pages/{docs => }/commands/operator/autopilot.mdx (94%) rename website/pages/{docs => }/commands/operator/index.mdx (80%) rename website/pages/{docs => }/commands/operator/raft.mdx (99%) rename website/pages/{docs => }/commands/reload.mdx (98%) rename website/pages/{docs => }/commands/rtt.mdx (99%) rename website/pages/{docs => }/commands/services/deregister.mdx (99%) rename website/pages/{docs => }/commands/services/index.mdx (98%) rename website/pages/{docs => }/commands/services/register.mdx (99%) rename website/pages/{docs => }/commands/snapshot/agent.mdx (99%) rename website/pages/{docs => }/commands/snapshot/index.mdx (99%) rename website/pages/{docs => }/commands/snapshot/inspect.mdx (98%) rename website/pages/{docs => }/commands/snapshot/restore.mdx (98%) rename website/pages/{docs => }/commands/snapshot/save.mdx (98%) rename website/pages/{docs => }/commands/tls/ca.mdx (98%) rename website/pages/{docs => }/commands/tls/cert.mdx (99%) rename website/pages/{docs => }/commands/tls/index.mdx (98%) rename website/pages/{docs => }/commands/validate.mdx (98%) rename website/pages/{docs => }/commands/version.mdx (98%) rename website/pages/{docs => }/commands/watch.mdx (99%) rename website/pages/docs/{internals => architecture}/anti-entropy.mdx (100%) rename website/pages/docs/{internals => architecture}/consensus.mdx (99%) rename website/pages/docs/{internals => architecture}/coordinates.mdx (100%) rename website/pages/docs/{internals => architecture}/gossip.mdx (98%) rename website/pages/docs/{internals/architecture.mdx => architecture/index.mdx} (100%) rename website/pages/docs/{internals => architecture}/jepsen.mdx (100%) rename website/pages/docs/connect/gateways/{mesh-gateway.mdx => mesh-gateway/index.mdx} (98%) rename website/pages/docs/connect/gateways/{ => mesh-gateway}/wan-federation-via-mesh-gateways.mdx (98%) rename website/pages/docs/{internals => connect/l7-traffic}/discovery-chain.mdx (100%) rename website/pages/docs/connect/{l7-traffic-management.mdx => l7-traffic/index.mdx} (100%) rename website/pages/docs/{agent => discovery}/checks.mdx (99%) rename website/pages/docs/{agent => discovery}/dns.mdx (99%) rename website/pages/docs/{agent => discovery}/services.mdx (99%) rename website/pages/{downloads_tools/index.mdx => docs/download-tools.mdx} (98%) rename website/pages/docs/{agent => dynamic-app-config}/kv.mdx (100%) rename website/pages/docs/{internals => dynamic-app-config}/sessions.mdx (100%) rename website/pages/docs/{agent => dynamic-app-config}/watches.mdx (100%) rename website/pages/docs/{agent => install}/cloud-auto-join.mdx (99%) rename website/pages/docs/{ => install}/glossary.mdx (99%) rename website/pages/{ => docs}/intro/index.mdx (97%) rename website/pages/{ => docs}/intro/vs/chef-puppet.mdx (99%) rename website/pages/{ => docs}/intro/vs/custom.mdx (99%) rename website/pages/{ => docs}/intro/vs/eureka.mdx (99%) rename website/pages/{ => docs}/intro/vs/index.mdx (98%) rename website/pages/{ => docs}/intro/vs/istio.mdx (99%) rename website/pages/{ => docs}/intro/vs/nagios-sensu.mdx (99%) rename website/pages/{ => docs}/intro/vs/proxies.mdx (99%) rename website/pages/{ => docs}/intro/vs/serf.mdx (99%) rename website/pages/{ => docs}/intro/vs/skydns.mdx (99%) rename website/pages/{ => docs}/intro/vs/smartstack.mdx (99%) rename website/pages/{ => docs}/intro/vs/zookeeper.mdx (99%) rename website/pages/docs/k8s/{ => installation}/helm.mdx (99%) rename website/pages/docs/k8s/{operations => }/tls-on-existing-cluster.mdx (96%) rename website/pages/docs/k8s/{operations/uninstalling.mdx => uninstall.mdx} (91%) rename website/pages/docs/k8s/{operations/upgrading.mdx => upgrade.mdx} (95%) rename website/pages/docs/{ => security}/acl/acl-legacy.mdx (100%) rename website/pages/docs/{ => security}/acl/acl-migrate-tokens.mdx (100%) rename website/pages/docs/{ => security}/acl/acl-rules.mdx (100%) rename website/pages/docs/{ => security}/acl/acl-system.mdx (99%) rename website/pages/docs/{ => security}/acl/auth-methods/index.mdx (100%) rename website/pages/docs/{ => security}/acl/auth-methods/jwt.mdx (100%) rename website/pages/docs/{ => security}/acl/auth-methods/kubernetes.mdx (100%) rename website/pages/docs/{ => security}/acl/auth-methods/oidc.mdx (100%) rename website/pages/docs/{ => security}/acl/index.mdx (100%) rename website/pages/docs/{agent => security}/encryption.mdx (100%) rename website/pages/docs/{internals/security.mdx => security/index.mdx} (99%) rename website/pages/docs/{ => troubleshoot}/common-errors.mdx (100%) rename website/pages/docs/{ => troubleshoot}/faq.mdx (100%) diff --git a/website/_redirects b/website/_redirects index c74519f83..3a63ba913 100644 --- a/website/_redirects +++ b/website/_redirects @@ -40,19 +40,172 @@ /docs/connect/terminating_gateway /docs/connect/gateways/terminating-gateway 301! /docs/connect/terminating_gateway.html /docs/connect/gateways/terminating-gateway 301! /docs/connect/terminating-gateway /docs/connect/gateways/terminating-gateway 301! +/docs/k8s/connect.html /docs/k8s/connect 301! +/docs/agent/cloud-auto-join /docs/install/cloud-auto-join 301! +/docs/internals/security /docs/security 301! +/docs/acl/ /docs/security/acl/ 301! +/docs/acl/acl-system /docs/security/acl/acl-system 301! +/docs/acl/acl-rules /docs/security/acl/acl-rules 301! +/docs/acl/acl-legacy /docs/security/acl/acl-legacy 301! +/docs/acl/acl-migrate-tokens /docs/security/acl/acl-migrate-tokens 301! +/docs/acl/auth-methods /docs/security/acl/auth-methods 301! +/docs/acl/auth-methods/kubernetes /docs/security/acl/auth-methods/kubernetes 301! +/docs/acl/auth-methods/jwt /docs/security/acl/auth-methods/jwt 301! +/docs/acl/auth-methods/oidc /docs/security/acl/auth-methods/oidc 301! +/docs/agent/kv /docs/dynamic-app-config/kv 301! +/docs/internals/sessions /docs/dynamic-app-config/sessions 301! +/docs/agent/watches /docs/dynamic-app-config/watches 301! +/docs/connect/l7-traffic-management /docs/connect/l7-traffic/ 301! +/docs/internals/discovery-chain /docs/connect/l7-traffic/discovery-chain 301! +/docs/k8s/operations/upgrading /docs/k8s/upgrade 301! +/docs/k8s/operations/uninstalling /docs/k8s/uninstall 301! +/docs/k8s/operations/tls-on-existing-cluster /docs/k8s/tls-on-existing-cluster 301! +/docs/k8s/helm /docs/k8s/installation/helm 301! +/docs/agent/services /docs/discovery/services 301! +/docs/agent/checks /docs/discovery/checks 301! +/docs/agent/dns /docs/discovery/dns 301! +/docs/agent/encryption /docs/security/encryption 301! +/docs/internals/architecture /docs/architecture 301! +/docs/internals/anti-entropy /docs/architecture/anti-entropy 301! +/docs/internals/consensus /docs/architecture/consensus 301! +/docs/internals/gossip /docs/architecture/gossip 301! +/docs/internals/jepsen /docs/internals/jepsen 301! +/docs/internals/coordinates /docs/architecture/coordinates 301! +/docs/glossary /docs/install/glossary 301! +/docs/connect/gateways/mesh-gateways /docs/connect/gateways/mesh-gateway 301! +/docs/connect/gateways/wan-federation-via-mesh-gateways /docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways 301! +/docs/faq /docs/troubleshoot/faq 301! +/docs/common-errors /docs/troubleshoot/common-errors 301! +/intro /docs/intro 301! +/intro/vs /docs/intro/vs 301! +/intro/vs/zookeeper /docs/intro/vs/zookeeper 301! +/intro/vs/chef-puppet /docs/intro/vs/chef-puppet 301! +/intro/vs/nagios-sensu /docs/intro/vs/nagios-sensu 301! +/intro/vs/skydns /docs/intro/vs/skydns 301! +/intro/vs/smartstack /docs/intro/vs/smartstack 301! +/intro/vs/serf /docs/intro/vs/serf 301! +/intro/vs/eureka /docs/intro/vs/eureka 301! +/intro/vs/istio /docs/intro/vs/istio 301! +/intro/vs/proxies /docs/intro/vs/proxies 301! +/intro/vs/custom /docs/intro/vs/custom 301! +/download-tools /docs/download-tools 301! + +# CLI redirects +/docs/commands /commands 301! +/docs/commands/acl /commands/acl 301! +/docs/commands/acl/auth-method /commands/acl/auth-method 301! +/docs/commands/acl/auth-method/create /commands/acl/auth-method/create 301! +/docs/commands/acl/auth-method/delete /commands/acl/auth-method/delete 301! +/docs/commands/acl/auth-method/list /commands/acl/auth-method/list 301! +/docs/commands/acl/auth-method/read /commands/acl/auth-method/read 301! +/docs/commands/acl/auth-method/update /commands/acl/auth-method/update 301! +/docs/commands/acl/binding-rule /commands/acl/binding-rule 301! +/docs/commands/acl/binding-rule/create /commands/acl/binding-rule/create 301! +/docs/commands/acl/binding-rule/delete /commands/acl/binding-rule/delete 301! +/docs/commands/acl/binding-rule/list /commands/acl/binding-rule/list 301! +/docs/commands/acl/binding-rule/read /commands/acl/binding-rule/read 301! +/docs/commands/acl/binding-rule/update /commands/acl/binding-rule/update 301! +/docs/commands/acl/bootstrap /commands/acl/bootstrap 301! +/docs/commands/acl/policy/ /commands/acl/policy 301! +/docs/commands/acl/policy/create /commands/acl/policy/create 301! +/docs/commands/acl/policy/delete /commands/acl/policy/delete 301! +/docs/commands/acl/policy/list /commands/acl/policy/list 301! +/docs/commands/acl/policy/read /commands/acl/policy/read 301! +/docs/commands/acl/policy/update /commands/acl/policy/update 301! +/docs/commands/acl/set-agent-token /commands/acl/set-agent-token 301! +/docs/commands/acl/token /commands/acl/token 301! +/docs/commands/acl/token/clone /commands/acl/token/clone 301! +/docs/commands/acl/token/create /commands/acl/token/create 301! +/docs/commands/acl/token/delete /commands/acl/token/delete 301! +/docs/commands/acl/token/list /commands/acl/token/list 301! +/docs/commands/acl/token/read /commands/acl/token/read 301! +/docs/commands/acl/token/update /commands/acl/token/update 301! +/docs/commands/acl/translate-rules /commands/acl/translate-rules 301! +/docs/commands/agent /commands/agent 301! +/docs/commands/catalog /commands/catalog 301! +/docs/commands/catalog/datacenters /commands/catalog/datacenters 301! +/docs/commands/catalog/nodes /commands/catalog/nodes 301! +/docs/commands/catalog/services /commands/catalog/services 301! +/docs/commands/config /commands/config 301! +/docs/commands/config/delete /commands/config/delete 301! +/docs/commands/config/list /commands/config/list 301! +/docs/commands/config/read /commands/config/read 301! +/docs/commands/config/write /commands/config/write 301! +/docs/commands/connect /commands/connect 301! +/docs/commands/connect/ca /commands/connect/ca 301! +/docs/commands/connect/proxy /commands/connect/proxy 301! +/docs/commands/connect/envoy /commands/connect/envoy 301! +/docs/commands/connect/expose /commands/connect/expose 301! +/docs/commands/debug /commands/debug 301! +/docs/commands/event /commands/event 301! +/docs/commands/exec /commands/exec 301! +/docs/commands/force-leave /commands/force-leave 301! +/docs/commands/info /commands/info 301! +/docs/commands/intention /commands/intention 301! +/docs/commands/intention/check /commands/intention/check 301! +/docs/commands/intention/create /commands/intention/create 301! +/docs/commands/intention/delete /commands/intention/delete 301! +/docs/commands/intention/get /commands/intention/get 301! +/docs/commands/intention/match /commands/intention/match 301! +/docs/commands/join /commands/join 301! +/docs/commands/keygen /commands/keygen 301! +/docs/commands/keyring /commands/keyring 301! +/docs/commands/kv /commands/kv 301! +/docs/commands/kv/delete /commands/kv/delete 301! +/docs/commands/kv/export /commands/kv/export 301! +/docs/commands/kv/get /commands/kv/get 301! +/docs/commands/kv/import /commands/kv/import 301! +/docs/commands/kv/put /commands/kv/put 301! +/docs/commands/leave /commands/leave 301! +/docs/commands/license /commands/license 301! +/docs/commands/lock /commands/lock 301! +/docs/commands/login /commands/login 301! +/docs/commands/logout /commands/logout 301! +/docs/commands/maint /commands/maint 301! +/docs/commands/members /commands/members 301! +/docs/commands/monitor /commands/monitor 301! +/docs/commands/namespace /commands/namespace 301! +/docs/commands/namespace/create /commands/namespace/create 301! +/docs/commands/namespace/delete /commands/namespace/delete 301! +/docs/commands/namespace/list /commands/namespace/list 301! +/docs/commands/namespace/read /commands/namespace/read 301! +/docs/commands/namespace/update /commands/namespace/update 301! +/docs/commands/namespace/write /commands/namespace/write 301! +/docs/commands/operator /commands/operator 301! +/docs/commands/operator/area /commands/operator/area 301! +/docs/commands/operator/autopilot /commands/operator/autopilot 301! +/docs/commands/operator/raft /commands/operator/raft 301! +/docs/commands/reload /commands/reload 301! +/docs/commands/rft /commands/rft 301! +/docs/commands/rtt /commands/rtt 301! +/docs/commands/services /commands/services 301! +/docs/commands/services/register /commands/services/register 301! +/docs/commands/services/deregister /commands/services/deregister 301! +/docs/commands/snapshot /commands/snapshot 301! +/docs/commands/snapshot/agent /commands/snapshot/agent 301! +/docs/commands/snapshot/inspect /commands/snapshot/inspect 301! +/docs/commands/snapshot/restore /commands/snapshot/restore 301! +/docs/commands/snapshot/save /commands/snapshot/save 301! +/docs/commands/tls /commands/tls 301! +/docs/commands/tls/ca /commands/tls/ca 301! +/docs/commands/tls/cert /commands/tls/cert 301! +/docs/commands/validate /commands/validate 301! +/docs/commands/version /commands/version 301! +/docs/commands/watch /commands/watch 301! +/commands/index /commands 301! # CLI renames -/docs/commands/acl/acl-bootstrap.html /docs/commands/acl/bootstrap.html 301! -/docs/commands/acl/acl-bootstrap /docs/commands/acl/bootstrap.html 301! -/docs/commands/acl/acl-policy.html /docs/commands/acl/policy.html 301! -/docs/commands/acl/acl-policy /docs/commands/acl/policy.html 301! -/docs/commands/acl/acl-set-agent-token.html /docs/commands/acl/set-agent-token.html 301! -/docs/commands/acl/acl-set-agent-token /docs/commands/acl/set-agent-token.html 301! -/docs/commands/acl/acl-token.html /docs/commands/acl/token.html 301! -/docs/commands/acl/acl-token /docs/commands/acl/token.html 301! -/docs/commands/acl/acl-translate-rules.html /docs/commands/acl/translate-rules.html 301! -/docs/commands/acl/acl-translate-rules /docs/commands/acl/translate-rules.html 301! +/docs/commands/acl/acl-bootstrap.html /commands/acl/bootstrap 301! +/docs/commands/acl/acl-bootstrap /commands/acl/bootstrap 301! +/docs/commands/acl/acl-policy.html /commands/acl/policy 301! +/docs/commands/acl/acl-policy /commands/acl/policy 301! +/docs/commands/acl/acl-set-agent-token.html /commands/acl/set-agent-token 301! +/docs/commands/acl/acl-set-agent-token /commands/acl/set-agent-token 301! +/docs/commands/acl/acl-token.html /commands/acl/token 301! +/docs/commands/acl/acl-token /commands/acl/token 301! +/docs/commands/acl/acl-translate-rules.html /commands/acl/translate-rules 301! +/docs/commands/acl/acl-translate-rules /commands/acl/translate-rules 301! # Consul Learn Redirects /docs/guides/acl.html https://learn.hashicorp.com/consul/security-networking/production-acls 301! diff --git a/website/components/learn-callout/style.css b/website/components/learn-callout/style.css index 6c78a7515..0ef9ae5b8 100644 --- a/website/components/learn-callout/style.css +++ b/website/components/learn-callout/style.css @@ -1,10 +1,6 @@ .g-learn-callout { padding-top: 88px; padding-bottom: 88px; - background-image: url(/img/nomad-panel-learn.svg); - background-size: contain; - background-position: bottom right; - background-repeat: no-repeat; @media (max-width: 768px) { padding-top: 64px; diff --git a/website/data/commands-navigation.js b/website/data/commands-navigation.js new file mode 100644 index 000000000..57ed8427f --- /dev/null +++ b/website/data/commands-navigation.js @@ -0,0 +1,78 @@ +export default [ + 'index', + { + category: 'acl', + content: [ + { + category: 'auth-method', + content: ['create', 'delete', 'list', 'read', 'update'], + }, + { + category: 'binding-rule', + content: ['create', 'delete', 'list', 'read', 'update'], + }, + 'bootstrap', + { + category: 'policy', + content: ['create', 'delete', 'list', 'read', 'update'], + }, + { + category: 'role', + content: ['create', 'delete', 'list', 'read', 'update'], + }, + 'set-agent-token', + { + category: 'token', + content: ['clone', 'create', 'delete', 'list', 'read', 'update'], + }, + 'translate-rules', + ], + }, + 'agent', + { category: 'catalog', content: ['datacenters', 'nodes', 'services'] }, + { category: 'config', content: ['delete', 'list', 'read', 'write'] }, + { category: 'connect', content: ['ca', 'proxy', 'envoy', 'expose'] }, + 'debug', + 'event', + 'exec', + 'force-leave', + 'info', + { + category: 'intention', + content: ['check', 'create', 'delete', 'get', 'match'], + }, + 'join', + 'keygen', + 'keyring', + { + category: 'kv', + content: ['delete', 'export', 'get', 'import', 'put'], + }, + 'leave', + 'license', + 'lock', + 'login', + 'logout', + 'maint', + 'members', + 'monitor', + { + category: 'namespace', + content: ['create', 'delete', 'list', 'read', 'update', 'write'], + }, + { + category: 'operator', + content: ['area', 'autopilot', 'raft'], + }, + 'reload', + 'rtt', + { category: 'services', content: ['register', 'deregister'] }, + { + category: 'snapshot', + content: ['agent', 'inspect', 'restore', 'save'], + }, + { category: 'tls', content: ['ca', 'cert'] }, + 'validate', + 'version', + 'watch', +] diff --git a/website/data/docs-navigation.js b/website/data/docs-navigation.js index 3038406c6..8fd74023b 100644 --- a/website/data/docs-navigation.js +++ b/website/data/docs-navigation.js @@ -7,178 +7,85 @@ // serve as the category title in the sidebar export default [ - { category: 'install', content: ['ports', 'bootstrapping', 'performance'] }, { - category: 'upgrading', - content: ['compatibility', 'upgrade-specific'], - }, - 'glossary', - { - category: 'internals', - content: [ - 'architecture', - 'consensus', - 'gossip', - 'coordinates', - 'sessions', - 'anti-entropy', - 'security', - 'jepsen', - 'discovery-chain', - ], - }, - { - category: 'commands', + category: 'intro', content: [ { - category: 'acl', + category: 'vs', content: [ - { - category: 'auth-method', - content: ['create', 'delete', 'list', 'read', 'update'], - }, - { - category: 'binding-rule', - content: ['create', 'delete', 'list', 'read', 'update'], - }, - 'bootstrap', - { - category: 'policy', - content: ['create', 'delete', 'list', 'read', 'update'], - }, - { - category: 'role', - content: ['create', 'delete', 'list', 'read', 'update'], - }, - 'set-agent-token', - { - category: 'token', - content: ['clone', 'create', 'delete', 'list', 'read', 'update'], - }, - 'translate-rules', + 'zookeeper', + 'chef-puppet', + 'nagios-sensu', + 'skydns', + 'smartstack', + 'serf', + 'eureka', + 'istio', + 'proxies', + 'custom', ], }, - 'agent', - { category: 'catalog', content: ['datacenters', 'nodes', 'services'] }, - { category: 'config', content: ['delete', 'list', 'read', 'write'] }, - { category: 'connect', content: ['ca', 'proxy', 'envoy', 'expose'] }, - 'debug', - 'event', - 'exec', - 'force-leave', - 'info', - { - category: 'intention', - content: ['check', 'create', 'delete', 'get', 'match'], - }, - 'join', - 'keygen', - 'keyring', - { - category: 'kv', - content: ['delete', 'export', 'get', 'import', 'put'], - }, - 'leave', - 'license', - 'lock', - 'login', - 'logout', - 'maint', - 'members', - 'monitor', - { - category: 'namespace', - content: ['create', 'delete', 'list', 'read', 'update', 'write'], - }, - { - category: 'operator', - content: ['area', 'autopilot', 'raft'], - }, - 'reload', - 'rtt', - { category: 'services', content: ['register', 'deregister'] }, - { - category: 'snapshot', - content: ['agent', 'inspect', 'restore', 'save'], - }, - { category: 'tls', content: ['ca', 'cert'] }, - 'validate', - 'version', - 'watch', ], }, + { - category: 'agent', + category: 'install', content: [ - 'dns', - 'options', - { - category: 'config-entries', - content: [ - 'ingress-gateway', - 'proxy-defaults', - 'service-defaults', - 'service-resolver', - 'service-router', - 'service-splitter', - 'terminating-gateway', - ], - }, + { title: 'Consul Agent', href: '/docs/agent' }, + 'glossary', + 'ports', + 'bootstrapping', 'cloud-auto-join', - 'services', - 'checks', - 'kv', - 'sentinel', - 'encryption', - 'telemetry', - 'watches', + 'performance', + { title: 'Kubernetes', href: '/docs/k8s' }, ], }, { - category: 'acl', - content: [ - 'acl-system', - 'acl-rules', - 'acl-legacy', - 'acl-migrate-tokens', - { category: 'auth-methods', content: ['kubernetes', 'jwt', 'oidc'] }, - ], + category: 'discovery', + name: 'Service Discovery', + content: ['services', 'dns', 'checks'], }, + { category: 'connect', content: [ - 'configuration', - 'connectivity-tasks', 'connect-internals', - 'observability', - 'l7-traffic-management', - 'intentions', + 'configuration', { category: 'proxies', content: ['envoy', 'built-in', 'integrate'], }, + { + category: 'registration', + content: ['service-registration', 'sidecar-service'], + }, + 'intentions', + 'observability', + { + category: 'l7-traffic', + content: ['discovery-chain'], + }, + 'connectivity-tasks', { category: 'gateways', content: [ - 'mesh-gateway', - 'wan-federation-via-mesh-gateways', + { + category: 'mesh-gateway', + content: ['wan-federation-via-mesh-gateways'], + }, + 'ingress-gateway', 'terminating-gateway', ], }, - { - category: 'registration', - content: ['service-registration', 'sidecar-service'], - }, - 'security', + 'nomad', + { title: 'Kubernetes', href: '/docs/k8s/connect' }, + { category: 'native', content: ['go'] }, { category: 'ca', content: ['consul', 'vault', 'aws'], }, - { category: 'native', content: ['go'] }, 'dev', - 'nomad', - { title: 'Kubernetes', href: '/docs/k8s/connect' }, ], }, { @@ -187,6 +94,7 @@ export default [ { category: 'installation', content: [ + 'helm', { category: 'platforms', name: 'Platform Guides', @@ -229,11 +137,7 @@ export default [ }, ], }, - { - category: 'operations', - name: 'Operations', - content: ['upgrading', 'tls-on-existing-cluster', 'uninstalling'], - }, + 'tls-on-existing-cluster', { category: 'connect', content: [ @@ -245,14 +149,50 @@ export default [ 'service-sync', 'dns', 'ambassador', - 'helm', + 'upgrade', + 'uninstall', + ], + }, + { + category: 'dynamic-app-config', + name: 'Dynamic App Configuration', + content: ['kv', 'sessions', 'watches'], + }, + { + category: 'agent', + content: [ + 'options', + { + category: 'config-entries', + content: [ + 'ingress-gateway', + 'proxy-defaults', + 'service-defaults', + 'service-resolver', + 'service-router', + 'service-splitter', + 'terminating-gateway', + ], + }, + 'telemetry', + ], + }, + { + category: 'security', + content: [ + { + category: 'acl', + content: [ + 'acl-system', + 'acl-rules', + 'acl-legacy', + 'acl-migrate-tokens', + { category: 'auth-methods', content: ['kubernetes', 'jwt', 'oidc'] }, + ], + }, + 'encryption', ], }, - '-------', - 'common-errors', - 'faq', - '--------', - 'partnerships', { category: 'enterprise', content: [ @@ -260,11 +200,30 @@ export default [ 'backups', 'upgrades', 'read-scale', + { + title: 'Single sign-on - OIDC', + href: '/docs/security/acl/auth-methods/oidc', + }, 'redundancy', 'federation', - 'network-segments', 'namespaces', + 'network-segments', 'sentinel', ], }, + { + category: 'architecture', + content: ['anti-entropy', 'consensus', 'gossip', 'jepsen', 'coordinates'], + }, + 'partnerships', + 'download-tools', + { + category: 'upgrading', + content: ['compatibility', 'upgrade-specific'], + }, + { + category: 'troubleshoot', + name: 'Troubleshoot', + content: ['common-errors', 'faq'], + }, ] diff --git a/website/data/intro-navigation.js b/website/data/intro-navigation.js index c97176b0a..761c488cf 100644 --- a/website/data/intro-navigation.js +++ b/website/data/intro-navigation.js @@ -6,22 +6,4 @@ // the landing page for the category, or a "name" property to // serve as the category title in the sidebar -export default [ - 'index', - { - category: 'vs', - content: [ - 'zookeeper', - 'chef-puppet', - 'nagios-sensu', - 'skydns', - 'smartstack', - 'serf', - 'eureka', - 'istio', - 'proxies', - 'custom', - ], - }, - 'getting-started', -] +export default ['getting-started'] diff --git a/website/data/subnav.js b/website/data/subnav.js index 51fda8dec..1c3b5de40 100644 --- a/website/data/subnav.js +++ b/website/data/subnav.js @@ -1,5 +1,5 @@ export default [ - { text: 'Overview', url: '/intro' }, + { text: 'Overview', url: '/docs/intro' }, { text: 'Use Cases', submenu: [ @@ -39,6 +39,11 @@ export default [ url: '/api-docs', type: 'inbound', }, + { + text: 'CLI', + url: '/commands', + type: 'inbound,' + }, { text: 'Community', url: '/community', diff --git a/website/layouts/commands.jsx b/website/layouts/commands.jsx new file mode 100644 index 000000000..eaf370a71 --- /dev/null +++ b/website/layouts/commands.jsx @@ -0,0 +1,41 @@ +import DocsPage from '@hashicorp/react-docs-page' +import order from '../data/commands-navigation.js' +import { frontMatter as data } from '../pages/commands/**/*.mdx' +import Head from 'next/head' +import Link from 'next/link' +import { createMdxProvider } from '@hashicorp/nextjs-scripts/lib/providers/docs' + +const MDXProvider = createMdxProvider({ product: 'consul' }) + +function CommandsLayoutWrapper(pageMeta) { + function CommandsLayout(props) { + return ( + + + + ) + } + + CommandsLayout.getInitialProps = ({ asPath }) => ({ path: asPath }) + + return CommandsLayout +} + +export default CommandsLayoutWrapper diff --git a/website/package.json b/website/package.json index 931c00d25..1079ffef5 100644 --- a/website/package.json +++ b/website/package.json @@ -56,9 +56,9 @@ }, "main": "index.js", "scripts": { - "build": "node --max-old-space-size=2048 ./node_modules/.bin/next build", + "build": "node --max-old-space-size=4096 ./node_modules/.bin/next build", "dynamic": "NODE_ENV=production next build && next start", - "export": "node --max-old-space-size=2048 ./node_modules/.bin/next export", + "export": "node --max-old-space-size=4096 ./node_modules/.bin/next export", "format": "next-hashicorp format", "generate:component": "next-hashicorp generate component", "generate:readme": "next-hashicorp markdown-blocks README.md", diff --git a/website/pages/api-docs/acl/auth-methods.mdx b/website/pages/api-docs/acl/auth-methods.mdx index effc2de19..ef4dcad93 100644 --- a/website/pages/api-docs/acl/auth-methods.mdx +++ b/website/pages/api-docs/acl/auth-methods.mdx @@ -53,7 +53,7 @@ The table below shows this endpoint's support for - `MaxTokenTTL` `(duration: 0s)` - This specifies the maximum life of any token created by this auth method. When set it will initialize the - [`ExpirationTime`](/api/acl/tokens.html#expirationtime) field on all tokens + [`ExpirationTime`](/api/acl/tokens#expirationtime) field on all tokens to a value of `Token.CreateTime + AuthMethod.MaxTokenTTL`. This field is not persisted beyond its initial use. Can be specified in the form of `"60s"` or `"5m"` (i.e., 60 seconds or 5 minutes, respectively). This value must be no @@ -232,7 +232,7 @@ The table below shows this endpoint's support for - `MaxTokenTTL` `(duration: 0s)` - This specifies the maximum life of any token created by this auth method. When set it will initialize the - [`ExpirationTime`](/api/acl/tokens.html#expirationtime) field on all tokens + [`ExpirationTime`](/api/acl/tokens#expirationtime) field on all tokens to a value of `Token.CreateTime + AuthMethod.MaxTokenTTL`. This field is not persisted beyond its initial use. Can be specified in the form of `"60s"` or `"5m"` (i.e., 60 seconds or 5 minutes, respectively). This value must be no diff --git a/website/pages/api-docs/acl/index.mdx b/website/pages/api-docs/acl/index.mdx index 57010d52f..d4cc06e75 100644 --- a/website/pages/api-docs/acl/index.mdx +++ b/website/pages/api-docs/acl/index.mdx @@ -165,7 +165,7 @@ $ curl \ - `ReplicatedTokenIndex` - The last token index that was successfully replicated. This index can be compared with the value of the `X-Consul-Index` header returned - by the [`/v1/acl/tokens`](/api/acl/tokens#list-acls) endpoint to determine + by the [`/v1/acl/tokens`](/api/acl/tokens#list-tokens) endpoint to determine if the replication process has gotten all available ACL tokens. Note that ACL replication is rate limited so the indexes may lag behind the primary datacenter. diff --git a/website/pages/docs/commands/acl/auth-method/create.mdx b/website/pages/commands/acl/auth-method/create.mdx similarity index 99% rename from website/pages/docs/commands/acl/auth-method/create.mdx rename to website/pages/commands/acl/auth-method/create.mdx index 23aaa5244..e685c0684 100644 --- a/website/pages/docs/commands/acl/auth-method/create.mdx +++ b/website/pages/commands/acl/auth-method/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Auth Method Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/acl/auth-method/delete.mdx b/website/pages/commands/acl/auth-method/delete.mdx similarity index 97% rename from website/pages/docs/commands/acl/auth-method/delete.mdx rename to website/pages/commands/acl/auth-method/delete.mdx index a4de93e92..3a25521e8 100644 --- a/website/pages/docs/commands/acl/auth-method/delete.mdx +++ b/website/pages/commands/acl/auth-method/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Auth Method Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/acl/auth-method/index.mdx b/website/pages/commands/acl/auth-method/index.mdx similarity index 99% rename from website/pages/docs/commands/acl/auth-method/index.mdx rename to website/pages/commands/acl/auth-method/index.mdx index 05431814f..bd1b8aa32 100644 --- a/website/pages/docs/commands/acl/auth-method/index.mdx +++ b/website/pages/commands/acl/auth-method/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Auth Methods' sidebar_title: auth-method --- diff --git a/website/pages/docs/commands/acl/auth-method/list.mdx b/website/pages/commands/acl/auth-method/list.mdx similarity index 98% rename from website/pages/docs/commands/acl/auth-method/list.mdx rename to website/pages/commands/acl/auth-method/list.mdx index fd15ce18f..933b9c187 100644 --- a/website/pages/docs/commands/acl/auth-method/list.mdx +++ b/website/pages/commands/acl/auth-method/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Auth Method List' sidebar_title: list --- diff --git a/website/pages/docs/commands/acl/auth-method/read.mdx b/website/pages/commands/acl/auth-method/read.mdx similarity index 98% rename from website/pages/docs/commands/acl/auth-method/read.mdx rename to website/pages/commands/acl/auth-method/read.mdx index 14442da66..20ed1f526 100644 --- a/website/pages/docs/commands/acl/auth-method/read.mdx +++ b/website/pages/commands/acl/auth-method/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Auth Method Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/acl/auth-method/update.mdx b/website/pages/commands/acl/auth-method/update.mdx similarity index 99% rename from website/pages/docs/commands/acl/auth-method/update.mdx rename to website/pages/commands/acl/auth-method/update.mdx index d1017b160..c403c583e 100644 --- a/website/pages/docs/commands/acl/auth-method/update.mdx +++ b/website/pages/commands/acl/auth-method/update.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Auth Method Update' sidebar_title: update --- diff --git a/website/pages/docs/commands/acl/binding-rule/create.mdx b/website/pages/commands/acl/binding-rule/create.mdx similarity index 99% rename from website/pages/docs/commands/acl/binding-rule/create.mdx rename to website/pages/commands/acl/binding-rule/create.mdx index 6324d3042..d4bfef9aa 100644 --- a/website/pages/docs/commands/acl/binding-rule/create.mdx +++ b/website/pages/commands/acl/binding-rule/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Binding Rule Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/acl/binding-rule/delete.mdx b/website/pages/commands/acl/binding-rule/delete.mdx similarity index 97% rename from website/pages/docs/commands/acl/binding-rule/delete.mdx rename to website/pages/commands/acl/binding-rule/delete.mdx index e95ef986b..01d9efb32 100644 --- a/website/pages/docs/commands/acl/binding-rule/delete.mdx +++ b/website/pages/commands/acl/binding-rule/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Binding Rule Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/acl/binding-rule/index.mdx b/website/pages/commands/acl/binding-rule/index.mdx similarity index 99% rename from website/pages/docs/commands/acl/binding-rule/index.mdx rename to website/pages/commands/acl/binding-rule/index.mdx index bd1261480..0c24bb819 100644 --- a/website/pages/docs/commands/acl/binding-rule/index.mdx +++ b/website/pages/commands/acl/binding-rule/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Binding Rule' sidebar_title: binding-rule --- diff --git a/website/pages/docs/commands/acl/binding-rule/list.mdx b/website/pages/commands/acl/binding-rule/list.mdx similarity index 99% rename from website/pages/docs/commands/acl/binding-rule/list.mdx rename to website/pages/commands/acl/binding-rule/list.mdx index 97fd98188..418e4a685 100644 --- a/website/pages/docs/commands/acl/binding-rule/list.mdx +++ b/website/pages/commands/acl/binding-rule/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Binding Rule List' sidebar_title: list --- diff --git a/website/pages/docs/commands/acl/binding-rule/read.mdx b/website/pages/commands/acl/binding-rule/read.mdx similarity index 98% rename from website/pages/docs/commands/acl/binding-rule/read.mdx rename to website/pages/commands/acl/binding-rule/read.mdx index ec1c06bbc..dbafa02ed 100644 --- a/website/pages/docs/commands/acl/binding-rule/read.mdx +++ b/website/pages/commands/acl/binding-rule/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Binding Rule Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/acl/binding-rule/update.mdx b/website/pages/commands/acl/binding-rule/update.mdx similarity index 99% rename from website/pages/docs/commands/acl/binding-rule/update.mdx rename to website/pages/commands/acl/binding-rule/update.mdx index 9b63735c3..78346fbc2 100644 --- a/website/pages/docs/commands/acl/binding-rule/update.mdx +++ b/website/pages/commands/acl/binding-rule/update.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Binding Rule Update' sidebar_title: update --- diff --git a/website/pages/docs/commands/acl/bootstrap.mdx b/website/pages/commands/acl/bootstrap.mdx similarity index 86% rename from website/pages/docs/commands/acl/bootstrap.mdx rename to website/pages/commands/acl/bootstrap.mdx index eea0d244b..3e34bc798 100644 --- a/website/pages/docs/commands/acl/bootstrap.mdx +++ b/website/pages/commands/acl/bootstrap.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Bootstrap' sidebar_title: bootstrap --- @@ -11,7 +11,7 @@ Command: `consul acl bootstrap` The `acl bootstrap` command will request Consul to generate a new token with unlimited privileges to use for management purposes and output its details. This can only be done once and afterwards bootstrapping will be disabled. If all tokens are lost and you need to bootstrap again you can follow the bootstrap -[reset procedure](https://learn.hashicorp.com/tutorials/consul/access-control-troubleshoot?utm_source=consul.io&utm_medium=docs#reset-the-acl-system). +[reset procedure](https://learn.hashicorp.com/consul/security-networking/acl-troubleshooting?utm_source=consul.io&utm_medium=docs#reset-the-acl-system). The ACL system can also be bootstrapped via the [HTTP API](/api/acl/acl#bootstrap-acls). diff --git a/website/pages/docs/commands/acl/index.mdx b/website/pages/commands/acl/index.mdx similarity index 99% rename from website/pages/docs/commands/acl/index.mdx rename to website/pages/commands/acl/index.mdx index c9d319bf1..901c2a2fd 100644 --- a/website/pages/docs/commands/acl/index.mdx +++ b/website/pages/commands/acl/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL' sidebar_title: acl --- diff --git a/website/pages/docs/commands/acl/policy/create.mdx b/website/pages/commands/acl/policy/create.mdx similarity index 99% rename from website/pages/docs/commands/acl/policy/create.mdx rename to website/pages/commands/acl/policy/create.mdx index 58a3309c7..2a2649242 100644 --- a/website/pages/docs/commands/acl/policy/create.mdx +++ b/website/pages/commands/acl/policy/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Policy Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/acl/policy/delete.mdx b/website/pages/commands/acl/policy/delete.mdx similarity index 98% rename from website/pages/docs/commands/acl/policy/delete.mdx rename to website/pages/commands/acl/policy/delete.mdx index 8c6af9644..039f87d52 100644 --- a/website/pages/docs/commands/acl/policy/delete.mdx +++ b/website/pages/commands/acl/policy/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Policy Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/acl/policy/index.mdx b/website/pages/commands/acl/policy/index.mdx similarity index 99% rename from website/pages/docs/commands/acl/policy/index.mdx rename to website/pages/commands/acl/policy/index.mdx index 2acf08d24..e153e2104 100644 --- a/website/pages/docs/commands/acl/policy/index.mdx +++ b/website/pages/commands/acl/policy/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Policy' sidebar_title: policy --- diff --git a/website/pages/docs/commands/acl/policy/list.mdx b/website/pages/commands/acl/policy/list.mdx similarity index 99% rename from website/pages/docs/commands/acl/policy/list.mdx rename to website/pages/commands/acl/policy/list.mdx index c29d2c125..d1da9799a 100644 --- a/website/pages/docs/commands/acl/policy/list.mdx +++ b/website/pages/commands/acl/policy/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Policy List' sidebar_title: list --- diff --git a/website/pages/docs/commands/acl/policy/read.mdx b/website/pages/commands/acl/policy/read.mdx similarity index 99% rename from website/pages/docs/commands/acl/policy/read.mdx rename to website/pages/commands/acl/policy/read.mdx index 75eedb062..cc5243b3c 100644 --- a/website/pages/docs/commands/acl/policy/read.mdx +++ b/website/pages/commands/acl/policy/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Policy Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/acl/policy/update.mdx b/website/pages/commands/acl/policy/update.mdx similarity index 99% rename from website/pages/docs/commands/acl/policy/update.mdx rename to website/pages/commands/acl/policy/update.mdx index 5b1e9e945..a517e0638 100644 --- a/website/pages/docs/commands/acl/policy/update.mdx +++ b/website/pages/commands/acl/policy/update.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Policy Update' sidebar_title: update --- diff --git a/website/pages/docs/commands/acl/role/create.mdx b/website/pages/commands/acl/role/create.mdx similarity index 99% rename from website/pages/docs/commands/acl/role/create.mdx rename to website/pages/commands/acl/role/create.mdx index 9124510d8..28cdc167f 100644 --- a/website/pages/docs/commands/acl/role/create.mdx +++ b/website/pages/commands/acl/role/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Role Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/acl/role/delete.mdx b/website/pages/commands/acl/role/delete.mdx similarity index 98% rename from website/pages/docs/commands/acl/role/delete.mdx rename to website/pages/commands/acl/role/delete.mdx index 0e80cc693..e46258b50 100644 --- a/website/pages/docs/commands/acl/role/delete.mdx +++ b/website/pages/commands/acl/role/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Role Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/acl/role/index.mdx b/website/pages/commands/acl/role/index.mdx similarity index 99% rename from website/pages/docs/commands/acl/role/index.mdx rename to website/pages/commands/acl/role/index.mdx index ffa42155f..238202dda 100644 --- a/website/pages/docs/commands/acl/role/index.mdx +++ b/website/pages/commands/acl/role/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Role' sidebar_title: role --- diff --git a/website/pages/docs/commands/acl/role/list.mdx b/website/pages/commands/acl/role/list.mdx similarity index 99% rename from website/pages/docs/commands/acl/role/list.mdx rename to website/pages/commands/acl/role/list.mdx index b3dddc633..ddfc0ac21 100644 --- a/website/pages/docs/commands/acl/role/list.mdx +++ b/website/pages/commands/acl/role/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Role List' sidebar_title: list --- diff --git a/website/pages/docs/commands/acl/role/read.mdx b/website/pages/commands/acl/role/read.mdx similarity index 98% rename from website/pages/docs/commands/acl/role/read.mdx rename to website/pages/commands/acl/role/read.mdx index b6c625c37..d9002e122 100644 --- a/website/pages/docs/commands/acl/role/read.mdx +++ b/website/pages/commands/acl/role/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Role Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/acl/role/update.mdx b/website/pages/commands/acl/role/update.mdx similarity index 99% rename from website/pages/docs/commands/acl/role/update.mdx rename to website/pages/commands/acl/role/update.mdx index b40b93c5b..c59dc1544 100644 --- a/website/pages/docs/commands/acl/role/update.mdx +++ b/website/pages/commands/acl/role/update.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Role Update' sidebar_title: update --- diff --git a/website/pages/docs/commands/acl/set-agent-token.mdx b/website/pages/commands/acl/set-agent-token.mdx similarity index 98% rename from website/pages/docs/commands/acl/set-agent-token.mdx rename to website/pages/commands/acl/set-agent-token.mdx index bc6967adb..cb07fda30 100644 --- a/website/pages/docs/commands/acl/set-agent-token.mdx +++ b/website/pages/commands/acl/set-agent-token.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Set Agent Token' sidebar_title: set-agent-token --- diff --git a/website/pages/docs/commands/acl/token/clone.mdx b/website/pages/commands/acl/token/clone.mdx similarity index 98% rename from website/pages/docs/commands/acl/token/clone.mdx rename to website/pages/commands/acl/token/clone.mdx index 4841d3aec..88397a300 100644 --- a/website/pages/docs/commands/acl/token/clone.mdx +++ b/website/pages/commands/acl/token/clone.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token Clone' sidebar_title: clone --- diff --git a/website/pages/docs/commands/acl/token/create.mdx b/website/pages/commands/acl/token/create.mdx similarity index 99% rename from website/pages/docs/commands/acl/token/create.mdx rename to website/pages/commands/acl/token/create.mdx index a2533787e..70999b2aa 100644 --- a/website/pages/docs/commands/acl/token/create.mdx +++ b/website/pages/commands/acl/token/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/acl/token/delete.mdx b/website/pages/commands/acl/token/delete.mdx similarity index 97% rename from website/pages/docs/commands/acl/token/delete.mdx rename to website/pages/commands/acl/token/delete.mdx index 4e7f55b24..9772cc8b2 100644 --- a/website/pages/docs/commands/acl/token/delete.mdx +++ b/website/pages/commands/acl/token/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/acl/token/index.mdx b/website/pages/commands/acl/token/index.mdx similarity index 99% rename from website/pages/docs/commands/acl/token/index.mdx rename to website/pages/commands/acl/token/index.mdx index a7dd82628..2c28c1ab1 100644 --- a/website/pages/docs/commands/acl/token/index.mdx +++ b/website/pages/commands/acl/token/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token' sidebar_title: token --- diff --git a/website/pages/docs/commands/acl/token/list.mdx b/website/pages/commands/acl/token/list.mdx similarity index 99% rename from website/pages/docs/commands/acl/token/list.mdx rename to website/pages/commands/acl/token/list.mdx index dd4a41ead..a43ac075b 100644 --- a/website/pages/docs/commands/acl/token/list.mdx +++ b/website/pages/commands/acl/token/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token List' sidebar_title: list --- diff --git a/website/pages/docs/commands/acl/token/read.mdx b/website/pages/commands/acl/token/read.mdx similarity index 99% rename from website/pages/docs/commands/acl/token/read.mdx rename to website/pages/commands/acl/token/read.mdx index c31b3a941..4cfa07021 100644 --- a/website/pages/docs/commands/acl/token/read.mdx +++ b/website/pages/commands/acl/token/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/acl/token/update.mdx b/website/pages/commands/acl/token/update.mdx similarity index 96% rename from website/pages/docs/commands/acl/token/update.mdx rename to website/pages/commands/acl/token/update.mdx index c429d091e..db48904b3 100644 --- a/website/pages/docs/commands/acl/token/update.mdx +++ b/website/pages/commands/acl/token/update.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Token Update' sidebar_title: update --- @@ -56,8 +56,8 @@ Usage: `consul acl token update [options]` specified grant equivalent or appropriate access for the existing clients using this token. You can find examples on how to use the parameter in the [legacy token -migration](https://learn.hashicorp.com/tutorials/consul/access-control-token-migration) -tutorial. +migration](https://learn.hashicorp.com/consul/day-2-agent-authentication/migrate-acl-tokens) +guide. - `-format={pretty|json}` - Command output format. The default value is `pretty`. diff --git a/website/pages/docs/commands/acl/translate-rules.mdx b/website/pages/commands/acl/translate-rules.mdx similarity index 99% rename from website/pages/docs/commands/acl/translate-rules.mdx rename to website/pages/commands/acl/translate-rules.mdx index 3aea79adc..01192a2b1 100644 --- a/website/pages/docs/commands/acl/translate-rules.mdx +++ b/website/pages/commands/acl/translate-rules.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: ACL Translate Rules' sidebar_title: translate-rules --- diff --git a/website/pages/docs/commands/agent.mdx b/website/pages/commands/agent.mdx similarity index 97% rename from website/pages/docs/commands/agent.mdx rename to website/pages/commands/agent.mdx index 90458ff7c..3cf0123a9 100644 --- a/website/pages/docs/commands/agent.mdx +++ b/website/pages/commands/agent.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Agent' sidebar_title: agent description: >- diff --git a/website/pages/docs/commands/catalog/datacenters.mdx b/website/pages/commands/catalog/datacenters.mdx similarity index 96% rename from website/pages/docs/commands/catalog/datacenters.mdx rename to website/pages/commands/catalog/datacenters.mdx index ef2466652..1d2cd5a6d 100644 --- a/website/pages/docs/commands/catalog/datacenters.mdx +++ b/website/pages/commands/catalog/datacenters.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Catalog List Datacenters' sidebar_title: datacenters --- diff --git a/website/pages/docs/commands/catalog/index.mdx b/website/pages/commands/catalog/index.mdx similarity index 99% rename from website/pages/docs/commands/catalog/index.mdx rename to website/pages/commands/catalog/index.mdx index a6e83c395..925d3211e 100644 --- a/website/pages/docs/commands/catalog/index.mdx +++ b/website/pages/commands/catalog/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Catalog' sidebar_title: catalog --- diff --git a/website/pages/docs/commands/catalog/nodes.mdx b/website/pages/commands/catalog/nodes.mdx similarity index 99% rename from website/pages/docs/commands/catalog/nodes.mdx rename to website/pages/commands/catalog/nodes.mdx index 55044ce37..f07ddab28 100644 --- a/website/pages/docs/commands/catalog/nodes.mdx +++ b/website/pages/commands/catalog/nodes.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Catalog List Nodes' sidebar_title: nodes --- diff --git a/website/pages/docs/commands/catalog/services.mdx b/website/pages/commands/catalog/services.mdx similarity index 98% rename from website/pages/docs/commands/catalog/services.mdx rename to website/pages/commands/catalog/services.mdx index 4173b802b..ca2192080 100644 --- a/website/pages/docs/commands/catalog/services.mdx +++ b/website/pages/commands/catalog/services.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Catalog List Services' sidebar_title: services --- diff --git a/website/pages/docs/commands/config/delete.mdx b/website/pages/commands/config/delete.mdx similarity index 97% rename from website/pages/docs/commands/config/delete.mdx rename to website/pages/commands/config/delete.mdx index 442c511fa..c897d0340 100644 --- a/website/pages/docs/commands/config/delete.mdx +++ b/website/pages/commands/config/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Config Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/config/index.mdx b/website/pages/commands/config/index.mdx similarity index 98% rename from website/pages/docs/commands/config/index.mdx rename to website/pages/commands/config/index.mdx index 4a3960470..6abbc1ceb 100644 --- a/website/pages/docs/commands/config/index.mdx +++ b/website/pages/commands/config/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Config' sidebar_title: config --- diff --git a/website/pages/docs/commands/config/list.mdx b/website/pages/commands/config/list.mdx similarity index 97% rename from website/pages/docs/commands/config/list.mdx rename to website/pages/commands/config/list.mdx index 3378dc051..77670ff56 100644 --- a/website/pages/docs/commands/config/list.mdx +++ b/website/pages/commands/config/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Config List' sidebar_title: list --- diff --git a/website/pages/docs/commands/config/read.mdx b/website/pages/commands/config/read.mdx similarity index 98% rename from website/pages/docs/commands/config/read.mdx rename to website/pages/commands/config/read.mdx index 19b1a656b..a36c59904 100644 --- a/website/pages/docs/commands/config/read.mdx +++ b/website/pages/commands/config/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Config Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/config/write.mdx b/website/pages/commands/config/write.mdx similarity index 99% rename from website/pages/docs/commands/config/write.mdx rename to website/pages/commands/config/write.mdx index bf4a91544..034bd75dd 100644 --- a/website/pages/docs/commands/config/write.mdx +++ b/website/pages/commands/config/write.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Config Write' sidebar_title: write --- diff --git a/website/pages/docs/commands/connect/ca.mdx b/website/pages/commands/connect/ca.mdx similarity index 99% rename from website/pages/docs/commands/connect/ca.mdx rename to website/pages/commands/connect/ca.mdx index 412c10b88..8d8246684 100644 --- a/website/pages/docs/commands/connect/ca.mdx +++ b/website/pages/commands/connect/ca.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Connect CA' sidebar_title: ca description: > diff --git a/website/pages/docs/commands/connect/envoy.mdx b/website/pages/commands/connect/envoy.mdx similarity index 99% rename from website/pages/docs/commands/connect/envoy.mdx rename to website/pages/commands/connect/envoy.mdx index 142765ac0..700f06ded 100644 --- a/website/pages/docs/commands/connect/envoy.mdx +++ b/website/pages/commands/connect/envoy.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Connect Proxy' sidebar_title: envoy description: > diff --git a/website/pages/docs/commands/connect/expose.mdx b/website/pages/commands/connect/expose.mdx similarity index 99% rename from website/pages/docs/commands/connect/expose.mdx rename to website/pages/commands/connect/expose.mdx index 08237ac61..a590517bf 100644 --- a/website/pages/docs/commands/connect/expose.mdx +++ b/website/pages/commands/connect/expose.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Connect Expose' sidebar_title: expose description: > diff --git a/website/pages/docs/commands/connect/index.mdx b/website/pages/commands/connect/index.mdx similarity index 98% rename from website/pages/docs/commands/connect/index.mdx rename to website/pages/commands/connect/index.mdx index 63b15c800..31fb952ea 100644 --- a/website/pages/docs/commands/connect/index.mdx +++ b/website/pages/commands/connect/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Connect' sidebar_title: connect --- diff --git a/website/pages/docs/commands/connect/proxy.mdx b/website/pages/commands/connect/proxy.mdx similarity index 99% rename from website/pages/docs/commands/connect/proxy.mdx rename to website/pages/commands/connect/proxy.mdx index 404cd048d..b5c644dc0 100644 --- a/website/pages/docs/commands/connect/proxy.mdx +++ b/website/pages/commands/connect/proxy.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Connect Proxy' sidebar_title: proxy description: > diff --git a/website/pages/docs/commands/debug.mdx b/website/pages/commands/debug.mdx similarity index 99% rename from website/pages/docs/commands/debug.mdx rename to website/pages/commands/debug.mdx index 4a93b467f..9c2bad096 100644 --- a/website/pages/docs/commands/debug.mdx +++ b/website/pages/commands/debug.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Debug' sidebar_title: debug --- diff --git a/website/pages/docs/commands/event.mdx b/website/pages/commands/event.mdx similarity index 99% rename from website/pages/docs/commands/event.mdx rename to website/pages/commands/event.mdx index eb60dfbd1..a0453b6f4 100644 --- a/website/pages/docs/commands/event.mdx +++ b/website/pages/commands/event.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Event' sidebar_title: event description: >- diff --git a/website/pages/docs/commands/exec.mdx b/website/pages/commands/exec.mdx similarity index 99% rename from website/pages/docs/commands/exec.mdx rename to website/pages/commands/exec.mdx index 8403c2b9d..97154f2fc 100644 --- a/website/pages/docs/commands/exec.mdx +++ b/website/pages/commands/exec.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Exec' sidebar_title: exec description: >- diff --git a/website/pages/docs/commands/force-leave.mdx b/website/pages/commands/force-leave.mdx similarity index 95% rename from website/pages/docs/commands/force-leave.mdx rename to website/pages/commands/force-leave.mdx index cc11f3bb6..f676368fa 100644 --- a/website/pages/docs/commands/force-leave.mdx +++ b/website/pages/commands/force-leave.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Force Leave' sidebar_title: force-leave description: >- @@ -48,7 +48,7 @@ consul force-leave ec2-001-staging ``` When run on a server that is part of a -[WAN gossip pool](https://learn.hashicorp.com/tutorials/consul/federarion-gossip-wan), +[WAN gossip pool](https://learn.hashicorp.com/consul/security-networking/datacenters), `force-leave` can remove failed servers in other datacenters from the WAN pool. The identifying node-name in a WAN pool is `[node-name].[datacenter]`. diff --git a/website/pages/docs/commands/index.mdx b/website/pages/commands/index.mdx similarity index 99% rename from website/pages/docs/commands/index.mdx rename to website/pages/commands/index.mdx index f00eca7d0..03d5b6844 100644 --- a/website/pages/docs/commands/index.mdx +++ b/website/pages/commands/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: Commands sidebar_title: Commands (CLI) description: >- diff --git a/website/pages/docs/commands/info.mdx b/website/pages/commands/info.mdx similarity index 99% rename from website/pages/docs/commands/info.mdx rename to website/pages/commands/info.mdx index fd6fa1290..24618ae27 100644 --- a/website/pages/docs/commands/info.mdx +++ b/website/pages/commands/info.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Info' sidebar_title: info description: >- diff --git a/website/pages/docs/commands/intention/check.mdx b/website/pages/commands/intention/check.mdx similarity index 98% rename from website/pages/docs/commands/intention/check.mdx rename to website/pages/commands/intention/check.mdx index 263d782ab..d49609154 100644 --- a/website/pages/docs/commands/intention/check.mdx +++ b/website/pages/commands/intention/check.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Intention Check' sidebar_title: check --- diff --git a/website/pages/docs/commands/intention/create.mdx b/website/pages/commands/intention/create.mdx similarity index 98% rename from website/pages/docs/commands/intention/create.mdx rename to website/pages/commands/intention/create.mdx index efb65040d..3cc698d44 100644 --- a/website/pages/docs/commands/intention/create.mdx +++ b/website/pages/commands/intention/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Intention Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/intention/delete.mdx b/website/pages/commands/intention/delete.mdx similarity index 97% rename from website/pages/docs/commands/intention/delete.mdx rename to website/pages/commands/intention/delete.mdx index 9bf1f1aa0..2a5ce032a 100644 --- a/website/pages/docs/commands/intention/delete.mdx +++ b/website/pages/commands/intention/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Intention Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/intention/get.mdx b/website/pages/commands/intention/get.mdx similarity index 97% rename from website/pages/docs/commands/intention/get.mdx rename to website/pages/commands/intention/get.mdx index 570eb7243..c961fc580 100644 --- a/website/pages/docs/commands/intention/get.mdx +++ b/website/pages/commands/intention/get.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Intention Get' sidebar_title: get --- diff --git a/website/pages/docs/commands/intention/index.mdx b/website/pages/commands/intention/index.mdx similarity index 99% rename from website/pages/docs/commands/intention/index.mdx rename to website/pages/commands/intention/index.mdx index 922f59b7e..c26a26926 100644 --- a/website/pages/docs/commands/intention/index.mdx +++ b/website/pages/commands/intention/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Intention' sidebar_title: intention --- diff --git a/website/pages/docs/commands/intention/match.mdx b/website/pages/commands/intention/match.mdx similarity index 98% rename from website/pages/docs/commands/intention/match.mdx rename to website/pages/commands/intention/match.mdx index 12edbc0ee..b94b56c76 100644 --- a/website/pages/docs/commands/intention/match.mdx +++ b/website/pages/commands/intention/match.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Intention Match' sidebar_title: match --- diff --git a/website/pages/docs/commands/join.mdx b/website/pages/commands/join.mdx similarity index 98% rename from website/pages/docs/commands/join.mdx rename to website/pages/commands/join.mdx index a04847cd0..c2c8e295e 100644 --- a/website/pages/docs/commands/join.mdx +++ b/website/pages/commands/join.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Join' sidebar_title: join description: >- diff --git a/website/pages/docs/commands/keygen.mdx b/website/pages/commands/keygen.mdx similarity index 97% rename from website/pages/docs/commands/keygen.mdx rename to website/pages/commands/keygen.mdx index 02976065c..0a48ff909 100644 --- a/website/pages/docs/commands/keygen.mdx +++ b/website/pages/commands/keygen.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Keygen' sidebar_title: keygen description: >- diff --git a/website/pages/docs/commands/keyring.mdx b/website/pages/commands/keyring.mdx similarity index 99% rename from website/pages/docs/commands/keyring.mdx rename to website/pages/commands/keyring.mdx index 5908c4a13..cc61b619a 100644 --- a/website/pages/docs/commands/keyring.mdx +++ b/website/pages/commands/keyring.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Keyring' sidebar_title: keyring --- diff --git a/website/pages/docs/commands/kv/delete.mdx b/website/pages/commands/kv/delete.mdx similarity index 99% rename from website/pages/docs/commands/kv/delete.mdx rename to website/pages/commands/kv/delete.mdx index 997fcf14a..52953dc2e 100644 --- a/website/pages/docs/commands/kv/delete.mdx +++ b/website/pages/commands/kv/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: KV Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/kv/export.mdx b/website/pages/commands/kv/export.mdx similarity index 97% rename from website/pages/docs/commands/kv/export.mdx rename to website/pages/commands/kv/export.mdx index 48053a14c..a48a736fc 100644 --- a/website/pages/docs/commands/kv/export.mdx +++ b/website/pages/commands/kv/export.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: KV Export' sidebar_title: export --- diff --git a/website/pages/docs/commands/kv/get.mdx b/website/pages/commands/kv/get.mdx similarity index 99% rename from website/pages/docs/commands/kv/get.mdx rename to website/pages/commands/kv/get.mdx index c748d23d8..288fa38e3 100644 --- a/website/pages/docs/commands/kv/get.mdx +++ b/website/pages/commands/kv/get.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: KV Get' sidebar_title: get --- diff --git a/website/pages/docs/commands/kv/import.mdx b/website/pages/commands/kv/import.mdx similarity index 98% rename from website/pages/docs/commands/kv/import.mdx rename to website/pages/commands/kv/import.mdx index 02d2a77a0..3e6dff10d 100644 --- a/website/pages/docs/commands/kv/import.mdx +++ b/website/pages/commands/kv/import.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: KV Import' sidebar_title: import --- diff --git a/website/pages/docs/commands/kv/index.mdx b/website/pages/commands/kv/index.mdx similarity index 99% rename from website/pages/docs/commands/kv/index.mdx rename to website/pages/commands/kv/index.mdx index 09a1ebdd2..ece885932 100644 --- a/website/pages/docs/commands/kv/index.mdx +++ b/website/pages/commands/kv/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: KV' sidebar_title: kv --- diff --git a/website/pages/docs/commands/kv/put.mdx b/website/pages/commands/kv/put.mdx similarity index 99% rename from website/pages/docs/commands/kv/put.mdx rename to website/pages/commands/kv/put.mdx index 62007b18a..50093e88f 100644 --- a/website/pages/docs/commands/kv/put.mdx +++ b/website/pages/commands/kv/put.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: KV Put' sidebar_title: put --- diff --git a/website/pages/docs/commands/leave.mdx b/website/pages/commands/leave.mdx similarity index 98% rename from website/pages/docs/commands/leave.mdx rename to website/pages/commands/leave.mdx index 077c3a9d2..dd946e62c 100644 --- a/website/pages/docs/commands/leave.mdx +++ b/website/pages/commands/leave.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Leave' sidebar_title: leave description: >- diff --git a/website/pages/docs/commands/license.mdx b/website/pages/commands/license.mdx similarity index 95% rename from website/pages/docs/commands/license.mdx rename to website/pages/commands/license.mdx index 1c21b4a0c..eaa0cabf4 100644 --- a/website/pages/docs/commands/license.mdx +++ b/website/pages/commands/license.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: License' sidebar_title: license description: > @@ -17,8 +17,8 @@ The `license` command provides datacenter-level management of the Consul Enterpr If ACLs are enabled then a token with operator privileges may be required in order to use this command. Requests are forwarded internally to the leader -if required, so this can be run from any Consul node in a cluster. Review the -[ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) for more information. +if required, so this can be run from any Consul node in a cluster. See the +[ACL Guide](https://learn.hashicorp.com/consul/security-networking/production-acls) for more information. ```text Usage: consul license [options] [args] diff --git a/website/pages/docs/commands/lock.mdx b/website/pages/commands/lock.mdx similarity index 97% rename from website/pages/docs/commands/lock.mdx rename to website/pages/commands/lock.mdx index eae70bdaa..d25b1fe9e 100644 --- a/website/pages/docs/commands/lock.mdx +++ b/website/pages/commands/lock.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Lock' sidebar_title: lock description: >- @@ -19,11 +19,11 @@ communication is disrupted, the child process is terminated. The number of lock holders is configurable with the `-n` flag. By default, a single holder is allowed, and a lock is used for mutual exclusion. This -uses the [leader election algorithm](https://learn.hashicorp.com/tutorials/consul/application-leader-elections). +uses the [leader election algorithm](https://learn.hashicorp.com/consul/developer-configuration/elections). If the lock holder count is more than one, then a semaphore is used instead. A semaphore allows more than a single holder, but this is less efficient than -a simple lock. This follows the [semaphore algorithm](https://learn.hashicorp.com/tutorials/consul/distributed-semaphore). +a simple lock. This follows the [semaphore algorithm](https://learn.hashicorp.com/consul/developer-configuration/semaphore). All locks using the same prefix must agree on the value of `-n`. If conflicting values of `-n` are provided, an error will be returned. diff --git a/website/pages/docs/commands/login.mdx b/website/pages/commands/login.mdx similarity index 99% rename from website/pages/docs/commands/login.mdx rename to website/pages/commands/login.mdx index b67a66b9c..6a9804f9a 100644 --- a/website/pages/docs/commands/login.mdx +++ b/website/pages/commands/login.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Login' sidebar_title: login description: > diff --git a/website/pages/docs/commands/logout.mdx b/website/pages/commands/logout.mdx similarity index 97% rename from website/pages/docs/commands/logout.mdx rename to website/pages/commands/logout.mdx index a1c9a8210..4a60a938d 100644 --- a/website/pages/docs/commands/logout.mdx +++ b/website/pages/commands/logout.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Logout' sidebar_title: logout description: > diff --git a/website/pages/docs/commands/maint.mdx b/website/pages/commands/maint.mdx similarity index 99% rename from website/pages/docs/commands/maint.mdx rename to website/pages/commands/maint.mdx index ad73614b3..f260d19b3 100644 --- a/website/pages/docs/commands/maint.mdx +++ b/website/pages/commands/maint.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Maint' sidebar_title: maint description: | diff --git a/website/pages/docs/commands/members.mdx b/website/pages/commands/members.mdx similarity index 98% rename from website/pages/docs/commands/members.mdx rename to website/pages/commands/members.mdx index 190831c86..b14144146 100644 --- a/website/pages/docs/commands/members.mdx +++ b/website/pages/commands/members.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Members' sidebar_title: members description: >- diff --git a/website/pages/docs/commands/monitor.mdx b/website/pages/commands/monitor.mdx similarity index 98% rename from website/pages/docs/commands/monitor.mdx rename to website/pages/commands/monitor.mdx index 8b06a7acd..5d30f52ed 100644 --- a/website/pages/docs/commands/monitor.mdx +++ b/website/pages/commands/monitor.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Monitor' sidebar_title: monitor description: >- diff --git a/website/pages/docs/commands/namespace/create.mdx b/website/pages/commands/namespace/create.mdx similarity index 99% rename from website/pages/docs/commands/namespace/create.mdx rename to website/pages/commands/namespace/create.mdx index 125e66cf6..88b6a802d 100644 --- a/website/pages/docs/commands/namespace/create.mdx +++ b/website/pages/commands/namespace/create.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace Create' sidebar_title: create --- diff --git a/website/pages/docs/commands/namespace/delete.mdx b/website/pages/commands/namespace/delete.mdx similarity index 97% rename from website/pages/docs/commands/namespace/delete.mdx rename to website/pages/commands/namespace/delete.mdx index ef8b305c5..8102f3c39 100644 --- a/website/pages/docs/commands/namespace/delete.mdx +++ b/website/pages/commands/namespace/delete.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace Delete' sidebar_title: delete --- diff --git a/website/pages/docs/commands/namespace/index.mdx b/website/pages/commands/namespace/index.mdx similarity index 99% rename from website/pages/docs/commands/namespace/index.mdx rename to website/pages/commands/namespace/index.mdx index 8a4b03c25..dd2f34a85 100644 --- a/website/pages/docs/commands/namespace/index.mdx +++ b/website/pages/commands/namespace/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace' sidebar_title: namespace description: | diff --git a/website/pages/docs/commands/namespace/list.mdx b/website/pages/commands/namespace/list.mdx similarity index 99% rename from website/pages/docs/commands/namespace/list.mdx rename to website/pages/commands/namespace/list.mdx index e952db58f..fbb82ac44 100644 --- a/website/pages/docs/commands/namespace/list.mdx +++ b/website/pages/commands/namespace/list.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace List' sidebar_title: list --- diff --git a/website/pages/docs/commands/namespace/read.mdx b/website/pages/commands/namespace/read.mdx similarity index 98% rename from website/pages/docs/commands/namespace/read.mdx rename to website/pages/commands/namespace/read.mdx index d45a0aa82..f0de95502 100644 --- a/website/pages/docs/commands/namespace/read.mdx +++ b/website/pages/commands/namespace/read.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace Read' sidebar_title: read --- diff --git a/website/pages/docs/commands/namespace/update.mdx b/website/pages/commands/namespace/update.mdx similarity index 99% rename from website/pages/docs/commands/namespace/update.mdx rename to website/pages/commands/namespace/update.mdx index 30d8ad671..73187e31f 100644 --- a/website/pages/docs/commands/namespace/update.mdx +++ b/website/pages/commands/namespace/update.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace Update' sidebar_title: update --- diff --git a/website/pages/docs/commands/namespace/write.mdx b/website/pages/commands/namespace/write.mdx similarity index 98% rename from website/pages/docs/commands/namespace/write.mdx rename to website/pages/commands/namespace/write.mdx index 057ac3b87..f2b6e5b42 100644 --- a/website/pages/docs/commands/namespace/write.mdx +++ b/website/pages/commands/namespace/write.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Namespace Write' sidebar_title: write --- diff --git a/website/pages/docs/commands/operator/area.mdx b/website/pages/commands/operator/area.mdx similarity index 98% rename from website/pages/docs/commands/operator/area.mdx rename to website/pages/commands/operator/area.mdx index 68533a5ba..f8b137d95 100644 --- a/website/pages/docs/commands/operator/area.mdx +++ b/website/pages/commands/operator/area.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Operator Area' sidebar_title: area description: > @@ -22,7 +22,7 @@ and relationships can be made between independent pairs of datacenters, so not a need to be fully connected. This allows for complex topologies among Consul datacenters like hub/spoke and more general trees. -Review the [Network Areas tutorial](https://learn.hashicorp.com/tutorials/consul/federation-network-areas) for more details. +See the [Network Areas Guide](https://learn.hashicorp.com/consul/day-2-operations/advanced-federation) for more details. ```text Usage: consul operator area [options] diff --git a/website/pages/docs/commands/operator/autopilot.mdx b/website/pages/commands/operator/autopilot.mdx similarity index 94% rename from website/pages/docs/commands/operator/autopilot.mdx rename to website/pages/commands/operator/autopilot.mdx index e350eafb5..44977dc80 100644 --- a/website/pages/docs/commands/operator/autopilot.mdx +++ b/website/pages/commands/operator/autopilot.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Operator Autopilot' sidebar_title: autopilot description: > @@ -12,8 +12,8 @@ description: > Command: `consul operator autopilot` The Autopilot operator command is used to interact with Consul's Autopilot subsystem. The -command can be used to view or modify the current Autopilot configuration. Review the -[Autopilot tutorial](https://learn.hashicorp.com/tutorials/consul/autopilot-datacenter-operations) for more information about Autopilot. +command can be used to view or modify the current Autopilot configuration. See the +[Autopilot Guide](https://learn.hashicorp.com/consul/day-2-operations/autopilot) for more information about Autopilot. ```text Usage: consul operator autopilot [options] diff --git a/website/pages/docs/commands/operator/index.mdx b/website/pages/commands/operator/index.mdx similarity index 80% rename from website/pages/docs/commands/operator/index.mdx rename to website/pages/commands/operator/index.mdx index 34226ed35..9b89e7322 100644 --- a/website/pages/docs/commands/operator/index.mdx +++ b/website/pages/commands/operator/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Operator' sidebar_title: operator description: | @@ -18,12 +18,12 @@ outage and even loss of data. If ACLs are enabled then a token with operator privileges may be required in order to use this command. Requests are forwarded internally to the leader -if required, so this can be run from any Consul node in a cluster. Review the -[ACL tutorial](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) for more information. +if required, so this can be run from any Consul node in a cluster. See the +[ACL Guide](https://learn.hashicorp.com/consul/security-networking/production-acls) for more information. -Review the [Outage Recovery](https://learn.hashicorp.com/tutorials/consul/recovery-outage) tutorial for some examples of how +See the [Outage Recovery](https://learn.hashicorp.com/consul/day-2-operations/outage) guide for some examples of how this command is used. For an API to perform these operations programmatically, -please check the documentation for the [Operator](/api/operator) +please see the documentation for the [Operator](/api/operator) endpoint. ## Usage diff --git a/website/pages/docs/commands/operator/raft.mdx b/website/pages/commands/operator/raft.mdx similarity index 99% rename from website/pages/docs/commands/operator/raft.mdx rename to website/pages/commands/operator/raft.mdx index 0e1236fbb..8c1729731 100644 --- a/website/pages/docs/commands/operator/raft.mdx +++ b/website/pages/commands/operator/raft.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Operator Raft' sidebar_title: raft description: > diff --git a/website/pages/docs/commands/reload.mdx b/website/pages/commands/reload.mdx similarity index 98% rename from website/pages/docs/commands/reload.mdx rename to website/pages/commands/reload.mdx index 6d75826cf..ddd036d42 100644 --- a/website/pages/docs/commands/reload.mdx +++ b/website/pages/commands/reload.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Reload' sidebar_title: reload description: The `reload` command triggers a reload of configuration files for the agent. diff --git a/website/pages/docs/commands/rtt.mdx b/website/pages/commands/rtt.mdx similarity index 99% rename from website/pages/docs/commands/rtt.mdx rename to website/pages/commands/rtt.mdx index d7728e5dc..e990658d1 100644 --- a/website/pages/docs/commands/rtt.mdx +++ b/website/pages/commands/rtt.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: RTT' sidebar_title: rtt description: | diff --git a/website/pages/docs/commands/services/deregister.mdx b/website/pages/commands/services/deregister.mdx similarity index 99% rename from website/pages/docs/commands/services/deregister.mdx rename to website/pages/commands/services/deregister.mdx index b15addf61..35df89dd4 100644 --- a/website/pages/docs/commands/services/deregister.mdx +++ b/website/pages/commands/services/deregister.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Services Deregister' sidebar_title: deregister --- diff --git a/website/pages/docs/commands/services/index.mdx b/website/pages/commands/services/index.mdx similarity index 98% rename from website/pages/docs/commands/services/index.mdx rename to website/pages/commands/services/index.mdx index 6383304fb..13f41bca0 100644 --- a/website/pages/docs/commands/services/index.mdx +++ b/website/pages/commands/services/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Services' sidebar_title: services --- diff --git a/website/pages/docs/commands/services/register.mdx b/website/pages/commands/services/register.mdx similarity index 99% rename from website/pages/docs/commands/services/register.mdx rename to website/pages/commands/services/register.mdx index 933f2ebb9..3c38bba6d 100644 --- a/website/pages/docs/commands/services/register.mdx +++ b/website/pages/commands/services/register.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Services Register' sidebar_title: register --- diff --git a/website/pages/docs/commands/snapshot/agent.mdx b/website/pages/commands/snapshot/agent.mdx similarity index 99% rename from website/pages/docs/commands/snapshot/agent.mdx rename to website/pages/commands/snapshot/agent.mdx index 24d89a6ec..284738aba 100644 --- a/website/pages/docs/commands/snapshot/agent.mdx +++ b/website/pages/commands/snapshot/agent.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Snapshot Agent' sidebar_title: agent --- diff --git a/website/pages/docs/commands/snapshot/index.mdx b/website/pages/commands/snapshot/index.mdx similarity index 99% rename from website/pages/docs/commands/snapshot/index.mdx rename to website/pages/commands/snapshot/index.mdx index ee67f32c7..0abe51af3 100644 --- a/website/pages/docs/commands/snapshot/index.mdx +++ b/website/pages/commands/snapshot/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Snapshot' sidebar_title: snapshot --- diff --git a/website/pages/docs/commands/snapshot/inspect.mdx b/website/pages/commands/snapshot/inspect.mdx similarity index 98% rename from website/pages/docs/commands/snapshot/inspect.mdx rename to website/pages/commands/snapshot/inspect.mdx index 8b93b6a3b..474303140 100644 --- a/website/pages/docs/commands/snapshot/inspect.mdx +++ b/website/pages/commands/snapshot/inspect.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Snapshot Inspect' sidebar_title: inspect --- diff --git a/website/pages/docs/commands/snapshot/restore.mdx b/website/pages/commands/snapshot/restore.mdx similarity index 98% rename from website/pages/docs/commands/snapshot/restore.mdx rename to website/pages/commands/snapshot/restore.mdx index ad45851bf..8bdae7041 100644 --- a/website/pages/docs/commands/snapshot/restore.mdx +++ b/website/pages/commands/snapshot/restore.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Snapshot Restore' sidebar_title: restore --- diff --git a/website/pages/docs/commands/snapshot/save.mdx b/website/pages/commands/snapshot/save.mdx similarity index 98% rename from website/pages/docs/commands/snapshot/save.mdx rename to website/pages/commands/snapshot/save.mdx index 6587df12c..5c3e24daa 100644 --- a/website/pages/docs/commands/snapshot/save.mdx +++ b/website/pages/commands/snapshot/save.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Snapshot Save' sidebar_title: save --- diff --git a/website/pages/docs/commands/tls/ca.mdx b/website/pages/commands/tls/ca.mdx similarity index 98% rename from website/pages/docs/commands/tls/ca.mdx rename to website/pages/commands/tls/ca.mdx index d6db7486b..3ac9a8f66 100644 --- a/website/pages/docs/commands/tls/ca.mdx +++ b/website/pages/commands/tls/ca.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: TLS CA Create' sidebar_title: ca --- diff --git a/website/pages/docs/commands/tls/cert.mdx b/website/pages/commands/tls/cert.mdx similarity index 99% rename from website/pages/docs/commands/tls/cert.mdx rename to website/pages/commands/tls/cert.mdx index fd266e883..328f0616f 100644 --- a/website/pages/docs/commands/tls/cert.mdx +++ b/website/pages/commands/tls/cert.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: TLS Cert Create' sidebar_title: cert --- diff --git a/website/pages/docs/commands/tls/index.mdx b/website/pages/commands/tls/index.mdx similarity index 98% rename from website/pages/docs/commands/tls/index.mdx rename to website/pages/commands/tls/index.mdx index 7db66107d..d69698877 100644 --- a/website/pages/docs/commands/tls/index.mdx +++ b/website/pages/commands/tls/index.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: TLS' sidebar_title: tls --- diff --git a/website/pages/docs/commands/validate.mdx b/website/pages/commands/validate.mdx similarity index 98% rename from website/pages/docs/commands/validate.mdx rename to website/pages/commands/validate.mdx index 6d72742c9..03ef8c530 100644 --- a/website/pages/docs/commands/validate.mdx +++ b/website/pages/commands/validate.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Validate' sidebar_title: validate description: > diff --git a/website/pages/docs/commands/version.mdx b/website/pages/commands/version.mdx similarity index 98% rename from website/pages/docs/commands/version.mdx rename to website/pages/commands/version.mdx index eae2aa99e..0f6ebf388 100644 --- a/website/pages/docs/commands/version.mdx +++ b/website/pages/commands/version.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Version' sidebar_title: version description: >- diff --git a/website/pages/docs/commands/watch.mdx b/website/pages/commands/watch.mdx similarity index 99% rename from website/pages/docs/commands/watch.mdx rename to website/pages/commands/watch.mdx index 6faf1189b..40312048f 100644 --- a/website/pages/docs/commands/watch.mdx +++ b/website/pages/commands/watch.mdx @@ -1,5 +1,5 @@ --- -layout: docs +layout: commands page_title: 'Commands: Watch' sidebar_title: watch description: >- diff --git a/website/pages/docs/agent/options.mdx b/website/pages/docs/agent/options.mdx index 9b64a6f40..95180f5d1 100644 --- a/website/pages/docs/agent/options.mdx +++ b/website/pages/docs/agent/options.mdx @@ -640,8 +640,8 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." roles. Setting this configuration will will enable ACL token replication and allow for the creation of both [local tokens](/api/acl/tokens#local) and [auth methods](/docs/acl/auth-methods) in connected secondary datacenters. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, global tokens already present in the secondary datacenter will be lost. For production environments, consider configuring ACL replication in your initial datacenter bootstrapping process. @@ -687,8 +687,8 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `replication` ((#acl_tokens_replication)) - The ACL token used to authorize secondary datacenters with the primary datacenter for replication operations. This token is required for servers outside the [`primary_datacenter`](#primary_datacenter) when ACLs are enabled. This token may be provided later using the [agent token API](/api/agent#update-acl-tokens) on each server. This token must have at least "read" permissions on ACL data but if ACL token replication is enabled then it must have "write" permissions. This also enables Connect replication, for which the token will require both operator "write" and intention "read" permissions for replicating CA and Intention data. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, policies and roles already present in the secondary datacenter will be lost. For production environments, consider configuring ACL replication in your initial datacenter bootstrapping process. @@ -926,11 +926,11 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `upgrade_version_tag` - The node_meta tag to use for version info when performing upgrade migrations. If this is not set, the Consul version will be used. - + - `auto_config` This object allows setting options for the `auto_config` feature. The following sub-keys are available: - + - `enabled` (Defaults to `false`) This option enables `auto_config` on a client agent. When starting up but before joining the cluster, the client agent will make an RPC to the configured server addresses to request configuration settings, @@ -943,44 +943,44 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." object available for use on Consul servers. Enabling this option also turns on Connect because it is vital for `auto_config`, more specifically the CA and certificates infrastructure. - - - `intro_token` (Defaults to `""`) This specifies the JWT to use for the initial - `auto_config` RPC to the Consul servers. This can be overridden with the + + - `intro_token` (Defaults to `""`) This specifies the JWT to use for the initial + `auto_config` RPC to the Consul servers. This can be overridden with the `CONSUL_INTRO_TOKEN` environment variable - + - `intro_token_file` (Defaults to `""`) This specifies a file containing the JWT to use for the initial `auto_config` RPC to the Consul servers. This token from this file is only loaded if the `intro_token` configuration is unset as well as the `CONSUL_INTRO_TOKEN` environment variable - + - `server_addresses` (Defaults to `[]`) This specifies the addresses of servers in the local datacenter to use for the initial RPC. These addresses support [Cloud Auto-Joining](#cloud-auto-joining) and can optionally include a port to use when making the outbound connection. If not port is provided the `server_port` will be used. - + - `dns_sans` (Defaults to `[]`) This is a list of extra DNS SANs to request in the client agent's TLS certificate. The `localhost` DNS SAN is always requested. - + - `ip_sans` (Defaults to `[]`) This is a list of extra IP SANs to request in the client agent's TLS certficate. The `::1` and `127.0.0.1` IP SANs are always requested. - + - `authorization` This object controls how a Consul server will authorize `auto_config` requests and in particular how to verify the JWT intro token. - - - `enabled` (Defaults to `false`) This option enables `auto_config` authorization + + - `enabled` (Defaults to `false`) This option enables `auto_config` authorization capabilities on the server. - + - `static` This object controls configuring the static authorizer setup in the Consul configuration file. Almost all sub-keys are identical to those provided by the [JWT Auth Method](/docs/acl/auth-methods/jwt). - + - `jwt_validation_pub_keys` (Defaults to `[]`) A list of PEM-encoded public keys to use to authenticate signatures locally. Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. - - `oidc_discovery_url` (Defaults to `""`) The OIDC Discovery URL, without any + - `oidc_discovery_url` (Defaults to `""`) The OIDC Discovery URL, without any .well-known component (base path). Exactly one of `jwks_url` `jwt_validation_pub_keys`, or `oidc_discovery_url` is required. @@ -998,7 +998,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." (`\n`). If not set, system certificates are used. - `claim_mappings` (Defaults to `(map[string]string)` Mappings of claims (key) that - will be copied to a metadata field (value). Use this if the claim you are capturing + will be copied to a metadata field (value). Use this if the claim you are capturing is singular (such as an attribute). When mapped, the values can be any of a number, string, or boolean and will @@ -1031,13 +1031,14 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `clock_skew_leeway` (Defaults to `"0s"`) Duration of leeway when validating all claims to account for clock skew. Defaults to 60s (1 minute) if set to 0s and can be disabled if set to -1ns. - + - `claim_assertions` (Defaults to []) List of assertions about the mapped claims required to authorize the incoming RPC request. The syntax uses - github.com/hashicorp/go-bexpr which is shared with the + github.com/hashicorp/go-bexpr which is shared with the [API filtering feature](/api/features/filtering). For example, the following configurations when combined will ensure that the JWT `sub` matches the node name requested by the client. + ``` claim_mappings { sub = "node_name" @@ -1046,15 +1047,14 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." "value.node_name == \"${node}\"" ] ``` - + The assertions are lightly templated using [HIL syntax](https://github.com/hashicorp/hil) to interpolate some values from the RPC request. The list of variables that can be interpolated are: - - - `node` - The node name the client agent is requesting. - - - `segment` - The network segment name the client is requesting. - + + - `node` - The node name the client agent is requesting. + + - `segment` - The network segment name the client is requesting. - `auto_encrypt` This object allows setting options for the `auto_encrypt` feature. @@ -1414,7 +1414,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." When set to true, in a DNS query for a service, the label between the domain and the `service` label will be treated as a namespace name instead of a datacenter. When set to false, the default, the behavior will be the same as non-Enterprise - versions and will assume the label is the datacenter. See: [this section](/docs/agent/dns#namespaced-services-enterprise) + versions and will assume the label is the datacenter. See: [this section](/docs/agent/dns#namespaced-services) for more details. - `domain` Equivalent to the [`-domain` command-line flag](#_domain). @@ -1423,8 +1423,8 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." the replication token via [`acl_replication_token`](#acl_replication_token). Instead, enable ACL replication and then introduce the token using the [agent token API](/api/agent#update-acl-tokens) on each server. See [`acl_replication_token`](#acl_replication_token) for more details. - - ~> **Warning:** When enabling ACL token replication on the secondary datacenter, + + ~> **Warning:** When enabling ACL token replication on the secondary datacenter, policies and roles already present in the secondary datacenter will be lost. For production environments, consider configuring ACL replication in your initial datacenter bootstrapping process. @@ -1648,7 +1648,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." configure Raft to its highest-performance mode, equivalent to the default timing of Consul prior to 0.7, and is recommended for [production Consul servers](/docs/install/performance#production). - See the note on [last contact](/docs/install/performance#last-contact) timing for more + See the note on [last contact](/docs/install/performance#production-server-requirements) timing for more details on tuning this parameter. The maximum allowed value is 10. - `rpc_hold_timeout` - A duration that a client diff --git a/website/pages/docs/internals/anti-entropy.mdx b/website/pages/docs/architecture/anti-entropy.mdx similarity index 100% rename from website/pages/docs/internals/anti-entropy.mdx rename to website/pages/docs/architecture/anti-entropy.mdx diff --git a/website/pages/docs/internals/consensus.mdx b/website/pages/docs/architecture/consensus.mdx similarity index 99% rename from website/pages/docs/internals/consensus.mdx rename to website/pages/docs/architecture/consensus.mdx index 3e16db110..0059406cf 100644 --- a/website/pages/docs/internals/consensus.mdx +++ b/website/pages/docs/architecture/consensus.mdx @@ -171,7 +171,7 @@ The three read modes are: For more documentation about using these various modes, see the [HTTP API](/api/features/consistency). -## Deployment Table +## Deployment Table ((#deployment_table)) Below is a table that shows quorum size and failure tolerance for various cluster sizes. The recommended deployment is either 3 or 5 servers. A single diff --git a/website/pages/docs/internals/coordinates.mdx b/website/pages/docs/architecture/coordinates.mdx similarity index 100% rename from website/pages/docs/internals/coordinates.mdx rename to website/pages/docs/architecture/coordinates.mdx diff --git a/website/pages/docs/internals/gossip.mdx b/website/pages/docs/architecture/gossip.mdx similarity index 98% rename from website/pages/docs/internals/gossip.mdx rename to website/pages/docs/architecture/gossip.mdx index 3320790ea..bfbbab47a 100644 --- a/website/pages/docs/internals/gossip.mdx +++ b/website/pages/docs/architecture/gossip.mdx @@ -41,9 +41,7 @@ is used as an embedded library to provide these features. From a user perspectiv this is not important, since the abstraction should be masked by Consul. It can be useful however as a developer to understand how this library is leveraged. - - -## Lifeguard Enhancements +## Lifeguard Enhancements ((#lifeguard)) SWIM makes the assumption that the local node is healthy in the sense that soft real-time processing of packets is possible. However, in cases diff --git a/website/pages/docs/internals/architecture.mdx b/website/pages/docs/architecture/index.mdx similarity index 100% rename from website/pages/docs/internals/architecture.mdx rename to website/pages/docs/architecture/index.mdx diff --git a/website/pages/docs/internals/jepsen.mdx b/website/pages/docs/architecture/jepsen.mdx similarity index 100% rename from website/pages/docs/internals/jepsen.mdx rename to website/pages/docs/architecture/jepsen.mdx diff --git a/website/pages/docs/connect/connect-internals.mdx b/website/pages/docs/connect/connect-internals.mdx index 8558e029d..ef4eb6a0b 100644 --- a/website/pages/docs/connect/connect-internals.mdx +++ b/website/pages/docs/connect/connect-internals.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect - Architecture -sidebar_title: Architecture +page_title: How Connect Works +sidebar_title: How Connect Works description: >- This page details the internals of Consul Connect: mutual TLS, agent caching and performance, intention and certificate authority replication. diff --git a/website/pages/docs/connect/gateways/ingress-gateway.mdx b/website/pages/docs/connect/gateways/ingress-gateway.mdx index 4f62b2940..170a4f460 100644 --- a/website/pages/docs/connect/gateways/ingress-gateway.mdx +++ b/website/pages/docs/connect/gateways/ingress-gateway.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect - Ingress Gateways -sidebar_title: Ingress Gateways +page_title: External <> Internal Services - Ingress Gateways +sidebar_title: External <> Internal Services - Ingress Gateways description: >- An ingress gateway enables ingress traffic from services outside the Consul service mesh to services inside the Consul service mesh. This section details diff --git a/website/pages/docs/connect/gateways/mesh-gateway.mdx b/website/pages/docs/connect/gateways/mesh-gateway/index.mdx similarity index 98% rename from website/pages/docs/connect/gateways/mesh-gateway.mdx rename to website/pages/docs/connect/gateways/mesh-gateway/index.mdx index 00f245339..76ddb1ed7 100644 --- a/website/pages/docs/connect/gateways/mesh-gateway.mdx +++ b/website/pages/docs/connect/gateways/mesh-gateway/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect - Mesh Gateways -sidebar_title: Mesh Gateways +page_title: Connect Datacenters - Mesh Gateways +sidebar_title: Connect Datacenters - Mesh Gateways description: >- A Mesh Gateway enables better routing of a Connect service's data to upstreams in other datacenters. This section details how to use Envoy and describes how diff --git a/website/pages/docs/connect/gateways/wan-federation-via-mesh-gateways.mdx b/website/pages/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx similarity index 98% rename from website/pages/docs/connect/gateways/wan-federation-via-mesh-gateways.mdx rename to website/pages/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx index 3efcc4b5c..e8d726ae6 100644 --- a/website/pages/docs/connect/gateways/wan-federation-via-mesh-gateways.mdx +++ b/website/pages/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect - WAN Federation via Mesh Gateways -sidebar_title: WAN Federation via Mesh Gateways +page_title: WAN Federation via Mesh Gateways +sidebar_title: WAN Federation description: |- WAN federation via mesh gateways allows for Consul servers in different datacenters to be federated exclusively through mesh gateways. --- @@ -176,5 +176,5 @@ expected result: their _local_ ip addresses and are listed as `alive`. - Ensure any API request that activates datacenter request forwarding. such as - [`/v1/catalog/services?dc=`](/api/catalog.html#dc-1) + [`/v1/catalog/services?dc=`](/api/catalog#dc-1) succeeds. diff --git a/website/pages/docs/connect/gateways/terminating-gateway.mdx b/website/pages/docs/connect/gateways/terminating-gateway.mdx index 024d362be..889dd1db9 100644 --- a/website/pages/docs/connect/gateways/terminating-gateway.mdx +++ b/website/pages/docs/connect/gateways/terminating-gateway.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect - Terminating Gateways -sidebar_title: Terminating Gateways +page_title: Internal <> External Services - Terminating Gateways +sidebar_title: Internal <> External Services - Terminating Gateways description: >- A terminating gateway enables traffic from services in the Consul service mesh to services outside the mesh. This section details @@ -119,7 +119,7 @@ by sending the registration request to a client or server agent on a different h All services registered in the Consul catalog must be associated with a node, even when their node is not managed by a Consul client agent. All agent-less services with the same address can be registered under the same node name and address. -However, ensure that the [node name](/api/catalog.html#node) for external services registered directly in the catalog +However, ensure that the [node name](/api/catalog#node) for external services registered directly in the catalog does not match the node name of any Consul client agent node. If the node name overlaps with the node name of a Consul client agent, Consul's [anti-entropy sync](/docs/internals/anti-entropy) will delete the services registered via the `/catalog/register` HTTP API endpoint. diff --git a/website/pages/docs/connect/index.mdx b/website/pages/docs/connect/index.mdx index b9d19697b..b8abf9f3c 100644 --- a/website/pages/docs/connect/index.mdx +++ b/website/pages/docs/connect/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect (Service Segmentation) -sidebar_title: Connect - Service Mesh +page_title: Service Mesh +sidebar_title: Service Mesh description: |- Consul Connect provides service-to-service connection authorization and encryption using mutual TLS. diff --git a/website/pages/docs/connect/intentions.mdx b/website/pages/docs/connect/intentions.mdx index 57befa2e0..a2c46cf26 100644 --- a/website/pages/docs/connect/intentions.mdx +++ b/website/pages/docs/connect/intentions.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Connect - Intentions -sidebar_title: Intentions - Security Policies +page_title: Service-to-service permissions - Intentions +sidebar_title: Service-to-service permissions - Intentions description: >- Intentions define access control for services via Connect and are used to control which services may establish connections. Intentions can be managed diff --git a/website/pages/docs/internals/discovery-chain.mdx b/website/pages/docs/connect/l7-traffic/discovery-chain.mdx similarity index 100% rename from website/pages/docs/internals/discovery-chain.mdx rename to website/pages/docs/connect/l7-traffic/discovery-chain.mdx diff --git a/website/pages/docs/connect/l7-traffic-management.mdx b/website/pages/docs/connect/l7-traffic/index.mdx similarity index 100% rename from website/pages/docs/connect/l7-traffic-management.mdx rename to website/pages/docs/connect/l7-traffic/index.mdx diff --git a/website/pages/docs/connect/proxies/built-in.mdx b/website/pages/docs/connect/proxies/built-in.mdx index 9f52ee209..f48b3e8cf 100644 --- a/website/pages/docs/connect/proxies/built-in.mdx +++ b/website/pages/docs/connect/proxies/built-in.mdx @@ -58,7 +58,7 @@ All fields are optional with a sane default. - `bind_port` - The port the proxy will bind it's _public_ mTLS listener to. If not provided, the agent will attempt to assign one from its - [configured proxy port range](/docs/agent/options#proxy_min_port) if available. + [configured proxy port range](/docs/agent/options#sidecar_min_port) if available. By default the range is [20000, 20255] and the port is selected at random from that range. diff --git a/website/pages/docs/connect/proxies/managed-deprecated.mdx b/website/pages/docs/connect/proxies/managed-deprecated.mdx index 7f3e3ecfd..c5d704ae3 100644 --- a/website/pages/docs/connect/proxies/managed-deprecated.mdx +++ b/website/pages/docs/connect/proxies/managed-deprecated.mdx @@ -79,7 +79,7 @@ via agent configuration files. They _cannot_ be registered via the HTTP API. And 2.) Managed proxies are not started at all if Consul is running as root. Both of these default configurations help prevent arbitrary process execution or privilege escalation. This behavior can be configured -[per-agent](/docs/agent/options#connect_proxy). +[per-agent](/docs/agent/options). ### Lifecycle @@ -172,7 +172,7 @@ passed to the proxy instance. For full details of the additional configurable options available when using the built-in proxy see the [built-in proxy configuration -reference](/docs/connect/configuration#built-in-proxy-options). +reference](/docs/connect/configuration). ### Prepared Query Upstreams @@ -209,7 +209,7 @@ service. For full details of the additional configurable options available when using the built-in proxy see the [built-in proxy configuration -reference](/docs/connect/configuration#built-in-proxy-options). +reference](/docs/connect/configuration). ### Custom Managed Proxy @@ -260,7 +260,7 @@ proxy command will use `my-proxy` instead of the default built-in proxy. The `config` key is an optional opaque JSON object which will be passed through to the proxy via the proxy configuration endpoint to allow any configuration options the proxy needs to be specified. See the [built-in proxy -configuration reference](/docs/connect/configuration#built-in-proxy-options) +configuration reference](/docs/connect/configuration) for details of config options that can be passed when using the built-in proxy. ### Managed Proxy Logs diff --git a/website/pages/docs/connect/security.mdx b/website/pages/docs/connect/security.mdx index 8ad4301b1..a92d8b060 100644 --- a/website/pages/docs/connect/security.mdx +++ b/website/pages/docs/connect/security.mdx @@ -14,7 +14,7 @@ Connect enables secure service-to-service communication over mutual TLS. This provides both in-transit data encryption as well as authorization. This page will document how to secure Connect. To try Connect locally, complete the [Getting Started guide](https://learn.hashicorp.com/tutorials/consul/service-mesh?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) or for a full security model reference, -see the dedicated [Consul security model](/docs/internals/security.html) page. When +see the dedicated [Consul security model](/docs/internals/security) page. When setting up Connect in production, review this [tutorial](https://learn.hashicorp.com/tutorials/consul/service-mesh-production-checklist?utm_source=consul.io&utm_medium=docs). Connect will function in any Consul configuration. However, unless the checklist diff --git a/website/pages/docs/agent/checks.mdx b/website/pages/docs/discovery/checks.mdx similarity index 99% rename from website/pages/docs/agent/checks.mdx rename to website/pages/docs/discovery/checks.mdx index 6b84fbbe6..6e114b95f 100644 --- a/website/pages/docs/agent/checks.mdx +++ b/website/pages/docs/discovery/checks.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Check Definition -sidebar_title: Check Definitions +page_title: Monitor Services - Check Definitions +sidebar_title: Monitor Services - Check Definitions description: >- One of the primary roles of the agent is management of system- and application-level health checks. A health check is considered to be diff --git a/website/pages/docs/agent/dns.mdx b/website/pages/docs/discovery/dns.mdx similarity index 99% rename from website/pages/docs/agent/dns.mdx rename to website/pages/docs/discovery/dns.mdx index 032982077..08759d78f 100644 --- a/website/pages/docs/agent/dns.mdx +++ b/website/pages/docs/discovery/dns.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: DNS Interface -sidebar_title: DNS Interface +page_title: Find Services - DNS Interface +sidebar_title: Find Services - DNS Interface description: >- One of the primary query interfaces for Consul is DNS. The DNS interface allows applications to make use of service discovery without any high-touch diff --git a/website/pages/docs/agent/services.mdx b/website/pages/docs/discovery/services.mdx similarity index 99% rename from website/pages/docs/agent/services.mdx rename to website/pages/docs/discovery/services.mdx index f533d2449..5622012c2 100644 --- a/website/pages/docs/agent/services.mdx +++ b/website/pages/docs/discovery/services.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Service Definition -sidebar_title: Service Definitions +page_title: Register Services - Service Definitions +sidebar_title: Register Services - Service Definitions description: >- One of the main goals of service discovery is to provide a catalog of available services. To that end, the agent provides a simple service diff --git a/website/pages/downloads_tools/index.mdx b/website/pages/docs/download-tools.mdx similarity index 98% rename from website/pages/downloads_tools/index.mdx rename to website/pages/docs/download-tools.mdx index 2188032e3..cf3785972 100644 --- a/website/pages/downloads_tools/index.mdx +++ b/website/pages/docs/download-tools.mdx @@ -1,6 +1,7 @@ --- -layout: index -page_title: Download Consul Tools +layout: docs +page_title: Consul Tools +sidebar_title: Consul Tools description: |- From this page you can download various tools for Consul. These tools are maintained by HashiCorp and the Consul Community. @@ -17,7 +18,7 @@ These Consul tools are created and managed by the dedicated engineers at HashiCo - [Envconsul](https://github.com/hashicorp/envconsul) - Read and set environmental variables for processes from Consul. - [Consul Migrate](https://github.com/hashicorp/consul-migrate) - Data migration tool to handle Consul upgrades to 0.5.1+ - [Consul Replicate](https://github.com/hashicorp/consul-replicate) - Consul cross-DC KV replication daemon. -- [Consul Template](https://github.com/hashicorp/consul-template) - Generic template rendering and notifications with Consul. A step-by-step tutorial is available on [HashiCorp Learn](https://learn.hashicorp.com/tutorials/consul/consul-template). +- [Consul Template](https://github.com/hashicorp/consul-template) - Generic template rendering and notifications with Consul. A step by step tutorial is available on [HashiCorp Learn](https://learn.hashicorp.com/tutorials/consul/consul-template). ## Community Tools diff --git a/website/pages/docs/agent/kv.mdx b/website/pages/docs/dynamic-app-config/kv.mdx similarity index 100% rename from website/pages/docs/agent/kv.mdx rename to website/pages/docs/dynamic-app-config/kv.mdx diff --git a/website/pages/docs/internals/sessions.mdx b/website/pages/docs/dynamic-app-config/sessions.mdx similarity index 100% rename from website/pages/docs/internals/sessions.mdx rename to website/pages/docs/dynamic-app-config/sessions.mdx diff --git a/website/pages/docs/agent/watches.mdx b/website/pages/docs/dynamic-app-config/watches.mdx similarity index 100% rename from website/pages/docs/agent/watches.mdx rename to website/pages/docs/dynamic-app-config/watches.mdx diff --git a/website/pages/docs/enterprise/index.mdx b/website/pages/docs/enterprise/index.mdx index e8203631a..37518712a 100644 --- a/website/pages/docs/enterprise/index.mdx +++ b/website/pages/docs/enterprise/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Consul Enterprise -sidebar_title: Consul Enterprise +page_title: Enterprise Features +sidebar_title: Enterprise Features description: >- Consul Enterprise features a number of capabilities beyond the open source offering that may be beneficial in certain workflows. diff --git a/website/pages/docs/guides/acl-legacy.mdx b/website/pages/docs/guides/acl-legacy.mdx index 1761b5d26..575a4b782 100644 --- a/website/pages/docs/guides/acl-legacy.mdx +++ b/website/pages/docs/guides/acl-legacy.mdx @@ -1069,9 +1069,7 @@ name that starts with "admin". ## Advanced Topics - - -#### Outages and ACL Replication +#### Outages and ACL Replication ((#replication)) The Consul ACL system is designed with flexible rules to accommodate for an outage of the [`primary_datacenter`](/docs/agent/options#primary_datacenter) or networking @@ -1133,9 +1131,7 @@ using a process like this: 4. Rolling restart the agents in other datacenters and change their `primary_datacenter` configuration to the target datacenter. - - -#### Complete ACL Coverage in Consul 0.8 +#### Complete ACL Coverage in Consul 0.8 ((#version_8_acls)) Consul 0.8 added many more ACL policy types and brought ACL enforcement to Consul agents for the first time. To ease the transition to Consul 0.8 for existing ACL diff --git a/website/pages/docs/guides/consul-f5.mdx b/website/pages/docs/guides/consul-f5.mdx index e8b579bbd..c309c3dbe 100644 --- a/website/pages/docs/guides/consul-f5.mdx +++ b/website/pages/docs/guides/consul-f5.mdx @@ -229,7 +229,7 @@ The above declaration does the following: - A pool named web_pool monitored by the http health monitor. - NGINX Pool members autodiscovered via Consul's [catalog HTTP API - endpoint](https://www.consul.io/api/catalog.html#list-nodes-for-service). + endpoint](/api-docs/catalog#list-nodes-for-service). For the `virtualAddresses` make sure to substitute your BIG-IP Virtual Server. diff --git a/website/pages/docs/guides/consul-splitting.mdx b/website/pages/docs/guides/consul-splitting.mdx index b7ce94649..a9b8cbfe3 100644 --- a/website/pages/docs/guides/consul-splitting.mdx +++ b/website/pages/docs/guides/consul-splitting.mdx @@ -183,10 +183,10 @@ $ consul config write l7_config/api_service_defaults.json ``` Find more information on `service-defaults` configuration entries in the -[documentation](https://www.consul.io/docs/agent/config-entries/service-defaults.html). +[documentation](/docs/agent/config-entries/service-defaults). -> **Automation Tip:** To automate interactions with configuration entries, use -the HTTP API endpoint [`http://localhost:8500/v1/config`](https://www.consul.io/api/config.html). +the HTTP API endpoint [`http://localhost:8500/v1/config`](/api/config). ### Configuring the Service Resolver @@ -232,7 +232,7 @@ $ consul config write l7_config/api_service_resolver.json ``` Find more information about service resolvers in the -[documentation](https://www.consul.io/docs/agent/config-entries/service-resolver.html). +[documentation](/docs/agent/config-entries/service-resolver). ### Configure Service Splitting - 100% of traffic to Version 1 @@ -248,7 +248,7 @@ The configuration entry for service splitting has the `kind` of act on. The `splits` field takes an array which defines the different splits; in this example, there are only two splits; however, it is [possible to configure multiple sequential -splits](https://www.consul.io/docs/connect/l7-traffic-management.html#splitting). +splits](/docs/connect/l7-traffic-management#splitting). Each split has a `weight` which defines the percentage of traffic to distribute to each service subset. The total weights for all splits must equal 100. For @@ -457,4 +457,4 @@ In this guide, we walked you through the steps required to perform Canary deployments using traffic splitting and resolution. Find out more about L7 traffic management settings in the -[documentation](https://www.consul.io/docs/connect/l7-traffic-management.html). +[documentation](/docs/connect/l7-traffic-management). diff --git a/website/pages/docs/guides/kuberenetes-deployment.mdx b/website/pages/docs/guides/kuberenetes-deployment.mdx index 7ab3cef23..050302a90 100644 --- a/website/pages/docs/guides/kuberenetes-deployment.mdx +++ b/website/pages/docs/guides/kuberenetes-deployment.mdx @@ -13,7 +13,7 @@ access to the Consul UI. ~> **Security Warning** This guide is not for production use. By default, the chart will install an insecure configuration of Consul. Please refer to the -[Kubernetes documentation](https://www.consul.io/docs/platform/k8s) +[Kubernetes documentation](/docs/platform/k8s) to determine how you can secure Consul on Kubernetes in production. Additionally, it is highly recommended to use a properly secured Kubernetes cluster or make sure that you understand and enable the recommended security @@ -22,7 +22,7 @@ features. To complete this guide successfully, you should have an existing Kubernetes cluster, and locally configured [Helm](https://helm.sh/docs/using_helm/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). If you do not have an -existing Kubernetes cluster you can use the [Minikube with Consul guide](https://www.consul.io/docs/guides/minikube.html) to get started +existing Kubernetes cluster you can use the [Minikube with Consul guide](/docs/guides/minikube) to get started with Consul on Kubernetes. ## Deploy Consul @@ -31,7 +31,7 @@ You can deploy a complete Consul datacenter using the official Helm chart. By default, the chart will install three Consul servers and client on all Kubernetes nodes. You can review the [Helm chart -values](https://www.consul.io/docs/platform/k8s/helm.html#configuration-values) +values](/docs/platform/k8s/helm#configuration-values) to learn more about the default settings. ### Download the Helm Chart @@ -45,7 +45,7 @@ $ git clone https://github.com/hashicorp/consul-helm.git You do not need to update the Helm chart before deploying Consul, it comes with reasonable defaults. Review the [Helm chart -documentation](https://www.consul.io/docs/platform/k8s/helm.html) to learn more +documentation](/docs/platform/k8s/helm) to learn more about the chart. ### Helm Install Consul @@ -81,7 +81,7 @@ Kubernetes cluster. To access the UI you will need to update the `ui` values in the Helm chart. Alternatively, if you do not wish to upgrade your cluster, you can set up [port -forwarding](https://www.consul.io/docs/platform/k8s/run.html#viewing-the-consul-ui) with +forwarding](/docs/platform/k8s/run#viewing-the-consul-ui) with `kubectl`. ### Create Values File @@ -111,7 +111,7 @@ server: ``` This file renames your datacenter, enables catalog sync, sets up a load -balancer service for the UI, and enables [affinity](https://www.consul.io/docs/platform/k8s/helm.html#v-server-affinity) to allow only one +balancer service for the UI, and enables [affinity](/docs/platform/k8s/helm#v-server-affinity) to allow only one Consul pod per Kubernetes node. The catalog sync parameters will allow you to see the Kubernetes services in the Consul UI. @@ -119,7 +119,7 @@ the Kubernetes services in the Consul UI. ### Initiate Rolling Upgrade Finally, initiate the -[upgrade](https://www.consul.io/docs/platform/k8s/run.html#upgrading-consul-on-kubernetes) +[upgrade](/docs/platform/k8s/run#upgrading-consul-on-kubernetes) with `helm upgrade` and the `-f` flag that passes in your new values file. This processes should also be quick, less than a minute. @@ -140,7 +140,7 @@ mollified-robin-consul-ui LoadBalancer 122.16.31.395 36.276.67.195 ``` Additionally, you can use `kubectl get pods` to view the new catalog sync -process. The [catalog sync](https://www.consul.io/docs/platform/k8s/helm.html#v-synccatalog) process will sync +process. The [catalog sync](/docs/platform/k8s/helm#v-synccatalog) process will sync Consul and Kubernetes services bidirectionally by default. @@ -190,7 +190,7 @@ gke-tier-2-cluster-default-pool-zrr0 172.20.0.20:8301 alive client 1.4.2 You can use the Consul HTTP API by communicating to the local agent running on the Kubernetes node. You can read the -[documentation](https://www.consul.io/docs/platform/k8s/run.html#accessing-the-consul-http-api) +[documentation](/docs/platform/k8s/run#accessing-the-consul-http-api) if you are interested in learning more about using the Consul HTTP API with Kubernetes. ## Summary @@ -199,4 +199,4 @@ In this guide, you deployed a Consul datacenter in Kubernetes using the official Helm chart. You also configured access to the Consul UI. To learn more about deploying applications that can use Consul's service discovery and Connect, read the example in the [Minikube with Consul -guide](https://www.consul.io/docs/guides/minikube.html#step-2-deploy-custom-applications). +guide](/docs/guides/minikube#step-2-deploy-custom-applications). diff --git a/website/pages/docs/guides/kubernetes-production-deploy.mdx b/website/pages/docs/guides/kubernetes-production-deploy.mdx index b1c726aa1..e4f4cc818 100644 --- a/website/pages/docs/guides/kubernetes-production-deploy.mdx +++ b/website/pages/docs/guides/kubernetes-production-deploy.mdx @@ -51,7 +51,7 @@ can reference these secrets in the customized Helm chart values file. used with the official image, `hashicorp/consul-enterprise:1.5.0-ent`. - Enable - [encryption](https://www.consul.io/docs/agent/encryption.html#gossip-encryption) to secure gossip traffic within the Consul cluster. + [encryption](/docs/agent/encryption#gossip-encryption) to secure gossip traffic within the Consul cluster. ~> Note, depending on your environment, the previous secrets may not be necessary. @@ -87,10 +87,10 @@ parameters based on your specific environment requirements. For security, set the `bootstrapACLs` parameter to true. This will enable Kubernetes to initially setup Consul's [ACL -system](https://www.consul.io/docs/acl/acl-system.html). +system](/docs/acl/acl-system). Read the Consul Helm chart documentation to review all the [global -parameters](https://www.consul.io/docs/platform/k8s/helm.html#v-global). +parameters](/docs/platform/k8s/helm#v-global). ### Consul UI @@ -104,7 +104,7 @@ or other service type in Kubernetes to make it easier to access the UI. ### Consul Servers For production deployments, you will need to deploy [3 or 5 Consul -servers](https://www.consul.io/docs/internals/consensus.html#deployment-table) +servers](/docs/internals/consensus#deployment-table) for quorum and failure tolerance. For most deployments, 3 servers are adequate. In the server section set both `replicas` and `bootstrapExpect` to 3. This will @@ -126,7 +126,7 @@ license](https://www.hashicorp.com/products/consul/enterprise) you should reference the Kubernetes secret in the `enterpriseLicense` parameter. Read the Consul Helm chart documentation to review all the [server -parameters](https://www.consul.io/docs/platform/k8s/helm.html#v-server) +parameters](/docs/platform/k8s/helm#v-server) ### Consul Clients @@ -139,7 +139,7 @@ horizontal scalability. Enabling `grpc` enables the GRPC listener on port 8502 and exposes it to the host. It is required to use Consul Connect. Read the Consul Helm chart documentation to review all the [client -parameters](https://www.consul.io/docs/platform/k8s/helm.html#v-client) +parameters](/docs/platform/k8s/helm#v-client) ### Consul Connect Injection Security @@ -153,14 +153,14 @@ same token if you are only using a default service account. This setting is only necessary if you have enabled ACLs in the global section. Read more about the [Connect Inject -parameters](https://www.consul.io/docs/platform/k8s/helm.html#v-connectinject). +parameters](/docs/platform/k8s/helm#v-connectinject). ## Complete Example Your finished values file should resemble the following example. For more complete descriptions of all the available parameters see the `values.yaml` file provided with the Helm chart and the [reference -documentation](https://www.consul.io/docs/platform/k8s/helm.html). +documentation](/docs/platform/k8s/helm). ```yaml # Configure global settings in this section. diff --git a/website/pages/docs/guides/managing-acl-policies.mdx b/website/pages/docs/guides/managing-acl-policies.mdx index 667de435f..9c3b596b1 100644 --- a/website/pages/docs/guides/managing-acl-policies.mdx +++ b/website/pages/docs/guides/managing-acl-policies.mdx @@ -19,7 +19,7 @@ We expect operators to automate the policy and token generation process in produ We provide high-level recommendations in this guide, however, we will not describe the command by command token generation process. To learn how to create tokens, read the [ACL bootstrapping guide](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production). -This guide assumes the `default_policy` of `deny` is set on all agents, in accordance to the [security model documentation](https://www.consul.io/docs/internals/security.html#secure-configuration). +This guide assumes the `default_policy` of `deny` is set on all agents, in accordance to the [security model documentation](/docs/internals/security#secure-configuration). ## Security and Usability @@ -37,7 +37,7 @@ To discover the minimum privileges required for a specific operation, we have th First, focus on the data in your environment that needs to be secured. Ensure your sensitive data has policies that are specific and limited. Since policies can be combined to create tokens, you will usually write more policies for sensitive data. Sensitive data could be a specific application or a set of values in the key-value store. -Second, reference the Consul docs, both the [rules page](https://www.consul.io/docs/acl/acl-rules.html) and [API pages](https://www.consul.io/api), often to understand the required privileges for any given operation. +Second, reference the Consul docs, both the [rules page](/docs/acl/acl-rules) and [API pages](/api), often to understand the required privileges for any given operation. The rules documentation explains the 11 rule resources. The following four resource types are critical for any operating datacenter with ACLs enabled. @@ -148,4 +148,4 @@ The Operator example above illustrates creating policies on the security spectru ## Next Steps -After setting up access control processes, you will need to implement a token rotation policy. If you are using third-party tool to generate tokens, such as Vault, Consul ACL tokens will adhere to the TTLs set in that third party tool. If you are manually rotating tokens or need to revoke access, you can delete a token at any time with the [API](https://www.consul.io/api/acl/tokens.html#delete-a-token). +After setting up access control processes, you will need to implement a token rotation policy. If you are using third-party tool to generate tokens, such as Vault, Consul ACL tokens will adhere to the TTLs set in that third party tool. If you are manually rotating tokens or need to revoke access, you can delete a token at any time with the [API](/api/acl/tokens#delete-a-token). diff --git a/website/pages/docs/guides/servers.mdx b/website/pages/docs/guides/servers.mdx index db094394d..e4d700e1a 100644 --- a/website/pages/docs/guides/servers.mdx +++ b/website/pages/docs/guides/servers.mdx @@ -51,7 +51,7 @@ option to add additional servers. ## Add a Server with Agent Configuration -In production environments, you should use the [agent configuration](https://www.consul.io/docs/agent/options.html) option, `retry_join`. `retry_join` can be used as a command line flag or in the agent configuration file. +In production environments, you should use the [agent configuration](/docs/agent/options) option, `retry_join`. `retry_join` can be used as a command line flag or in the agent configuration file. With the Consul CLI: @@ -70,7 +70,7 @@ In the agent configuration file: } ``` -[`retry_join`](https://www.consul.io/docs/agent/options.html#retry-join) +[`retry_join`](https://www.consul.io/docs/agent/options#retry-join) will ensure that if any server loses connection with the cluster for any reason, including the node restarting, it can rejoin when it comes back. In additon to working with static IPs, it diff --git a/website/pages/docs/agent/cloud-auto-join.mdx b/website/pages/docs/install/cloud-auto-join.mdx similarity index 99% rename from website/pages/docs/agent/cloud-auto-join.mdx rename to website/pages/docs/install/cloud-auto-join.mdx index dc787a314..01f5e8ae8 100644 --- a/website/pages/docs/agent/cloud-auto-join.mdx +++ b/website/pages/docs/install/cloud-auto-join.mdx @@ -181,10 +181,7 @@ $ consul agent -retry-join "provider=softlayer datacenter=... tag_value=... user ``` - `provider` (required) - the name of the provider ("softlayer" in this case). -- - - datacenter - (required) - the name of the datacenter to auto-join in. +- `datacenter ((#sl_datacenter)) (required) - the name of the datacenter to auto-join in. - `tag_value` (required) - the value of the tag to auto-join on. - `username` (required) - the username to use for auth. - `api_key` (required) - the api key to use for auth. diff --git a/website/pages/docs/glossary.mdx b/website/pages/docs/install/glossary.mdx similarity index 99% rename from website/pages/docs/glossary.mdx rename to website/pages/docs/install/glossary.mdx index e5137a4b3..6225bf05d 100644 --- a/website/pages/docs/glossary.mdx +++ b/website/pages/docs/install/glossary.mdx @@ -1,6 +1,6 @@ --- layout: docs -page_title: Consul Glossary +page_title: Glossary sidebar_title: Glossary description: >- This page collects brief definitions of some of the technical terms used in diff --git a/website/pages/docs/install/index.mdx b/website/pages/docs/install/index.mdx index 6fac20921..3846fe030 100644 --- a/website/pages/docs/install/index.mdx +++ b/website/pages/docs/install/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Install Consul -sidebar_title: Installing Consul +page_title: Get Started +sidebar_title: Get Started description: |- Installing Consul is simple. You can download a precompiled binary, compile from source or run on Kubernetes. This page details these methods. diff --git a/website/pages/docs/install/performance.mdx b/website/pages/docs/install/performance.mdx index b05ed3937..3798b0704 100644 --- a/website/pages/docs/install/performance.mdx +++ b/website/pages/docs/install/performance.mdx @@ -17,9 +17,7 @@ are generally I/O bound for writes because the underlying Raft log store perform to disk every time an entry is appended. Servers are generally CPU bound for reads since reads work from a fully in-memory data store that is optimized for concurrent access. - - -## Minimum Server Requirements +## Minimum Server Requirements ((#minimum)) In Consul 0.7, the default server [performance parameters](/docs/agent/options#performance) were tuned to allow Consul to run reliably (but relatively slowly) on a server cluster of three @@ -43,9 +41,7 @@ The default performance configuration is equivalent to this: } ``` - - -## Production Server Requirements +## Production Server Requirements ((#production)) When running Consul 0.7 and later in production, it is recommended to configure the server [performance parameters](/docs/agent/options#performance) back to Consul's original @@ -110,7 +106,7 @@ Here are some general recommendations: and CPU until leader elections stabilize, and in Consul 0.7 or later the [performance parameters](/docs/agent/options#performance) configuration now gives you tools to trade off performance instead of upsizing servers. You can use the [`consul.raft.leader.lastContact` - telemetry](/docs/agent/telemetry#last-contact) to observe how the Raft timing is + telemetry](/docs/agent/telemetry#leadership-changes) to observe how the Raft timing is performing and guide the decision to de-tune Raft performance or add more powerful servers. @@ -157,7 +153,9 @@ Consul is write limited by disk I/O and read limited by CPU. Memory requirements For **write-heavy** workloads, the total RAM available for overhead must approximately be equal to - RAM NEEDED = number of keys * average key size * 2-3x +``` +RAM NEEDED = number of keys * average key size * 2-3x +``` Since writes must be synced to disk (persistent storage) on a quorum of servers before they are committed, deploying a disk with high write throughput (or an SSD) will enhance performance on the write side. ([Documentation](/docs/agent/options#_data_dir)) diff --git a/website/pages/docs/internals/acl.mdx b/website/pages/docs/internals/acl.mdx index 59b9269fd..9f85926dd 100644 --- a/website/pages/docs/internals/acl.mdx +++ b/website/pages/docs/internals/acl.mdx @@ -8,11 +8,9 @@ description: >- them. It is very similar to AWS IAM in many ways. --- -# ACL System +# ACL System ((#version_8_acls)) This content has been moved into the [ACL Guide](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production). - - See [Complete ACL Coverage in Consul 0.8](/docs/acl/acl-legacy) for details about ACL changes in Consul 0.8 and later. diff --git a/website/pages/intro/index.mdx b/website/pages/docs/intro/index.mdx similarity index 97% rename from website/pages/intro/index.mdx rename to website/pages/docs/intro/index.mdx index c4b7df2b4..a0a352ff1 100644 --- a/website/pages/intro/index.mdx +++ b/website/pages/docs/intro/index.mdx @@ -1,7 +1,7 @@ --- -layout: intro -page_title: Introduction -sidebar_title: What is Consul? +layout: docs +page_title: Intro to Consul +sidebar_title: Intro to Consul description: >- Welcome to the intro guide to Consul! This guide is the best place to start with Consul. We cover what Consul is, what problems it can solve, how it @@ -106,5 +106,5 @@ forward the request to the remote datacenter and return the result. - See [how Consul compares to other software](/intro/vs) to assess how it fits into your existing infrastructure. -- Continue onwards with the [getting started guide](https://learn.hashicorp.com/tutorials/consul/get-started-install) +- Continue onwards with the [getting started guide](https://learn.hashicorp.com/tutorials/consul/get-started-install). to get Consul up and running. diff --git a/website/pages/intro/vs/chef-puppet.mdx b/website/pages/docs/intro/vs/chef-puppet.mdx similarity index 99% rename from website/pages/intro/vs/chef-puppet.mdx rename to website/pages/docs/intro/vs/chef-puppet.mdx index 5d0e0ac57..9382f28d0 100644 --- a/website/pages/intro/vs/chef-puppet.mdx +++ b/website/pages/docs/intro/vs/chef-puppet.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: 'Consul vs. Chef, Puppet, etc.' sidebar_title: 'Chef, Puppet, etc.' description: >- diff --git a/website/pages/intro/vs/custom.mdx b/website/pages/docs/intro/vs/custom.mdx similarity index 99% rename from website/pages/intro/vs/custom.mdx rename to website/pages/docs/intro/vs/custom.mdx index ad3205f8d..fe5b4921b 100644 --- a/website/pages/intro/vs/custom.mdx +++ b/website/pages/docs/intro/vs/custom.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. Custom Solutions sidebar_title: Custom Solutions description: >- diff --git a/website/pages/intro/vs/eureka.mdx b/website/pages/docs/intro/vs/eureka.mdx similarity index 99% rename from website/pages/intro/vs/eureka.mdx rename to website/pages/docs/intro/vs/eureka.mdx index 43dc65097..0e1a900cf 100644 --- a/website/pages/intro/vs/eureka.mdx +++ b/website/pages/docs/intro/vs/eureka.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. Eureka sidebar_title: Eureka description: >- diff --git a/website/pages/intro/vs/index.mdx b/website/pages/docs/intro/vs/index.mdx similarity index 98% rename from website/pages/intro/vs/index.mdx rename to website/pages/docs/intro/vs/index.mdx index d3cb7e39b..fd4fa4dc6 100644 --- a/website/pages/intro/vs/index.mdx +++ b/website/pages/docs/intro/vs/index.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. Other Software sidebar_title: Consul vs. Other Software description: >- diff --git a/website/pages/intro/vs/istio.mdx b/website/pages/docs/intro/vs/istio.mdx similarity index 99% rename from website/pages/intro/vs/istio.mdx rename to website/pages/docs/intro/vs/istio.mdx index cbc3b5741..bdbb215dc 100644 --- a/website/pages/intro/vs/istio.mdx +++ b/website/pages/docs/intro/vs/istio.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. Istio sidebar_title: Istio description: >- diff --git a/website/pages/intro/vs/nagios-sensu.mdx b/website/pages/docs/intro/vs/nagios-sensu.mdx similarity index 99% rename from website/pages/intro/vs/nagios-sensu.mdx rename to website/pages/docs/intro/vs/nagios-sensu.mdx index 4fbebb7cf..55eb3ea0d 100644 --- a/website/pages/intro/vs/nagios-sensu.mdx +++ b/website/pages/docs/intro/vs/nagios-sensu.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: 'Consul vs. Nagios, Sensu' sidebar_title: 'Nagios, Sensu' description: >- diff --git a/website/pages/intro/vs/proxies.mdx b/website/pages/docs/intro/vs/proxies.mdx similarity index 99% rename from website/pages/intro/vs/proxies.mdx rename to website/pages/docs/intro/vs/proxies.mdx index c79b04f9f..cee0c4bde 100644 --- a/website/pages/intro/vs/proxies.mdx +++ b/website/pages/docs/intro/vs/proxies.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. Envoy and Other Proxies sidebar_title: Envoy and Other Proxies description: >- diff --git a/website/pages/intro/vs/serf.mdx b/website/pages/docs/intro/vs/serf.mdx similarity index 99% rename from website/pages/intro/vs/serf.mdx rename to website/pages/docs/intro/vs/serf.mdx index 995bbcb85..8b81904a0 100644 --- a/website/pages/intro/vs/serf.mdx +++ b/website/pages/docs/intro/vs/serf.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. Serf sidebar_title: Serf description: >- diff --git a/website/pages/intro/vs/skydns.mdx b/website/pages/docs/intro/vs/skydns.mdx similarity index 99% rename from website/pages/intro/vs/skydns.mdx rename to website/pages/docs/intro/vs/skydns.mdx index 909e4a522..c645014a1 100644 --- a/website/pages/intro/vs/skydns.mdx +++ b/website/pages/docs/intro/vs/skydns.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. SkyDNS sidebar_title: SkyDNS description: >- diff --git a/website/pages/intro/vs/smartstack.mdx b/website/pages/docs/intro/vs/smartstack.mdx similarity index 99% rename from website/pages/intro/vs/smartstack.mdx rename to website/pages/docs/intro/vs/smartstack.mdx index 064d04fb6..816a638a0 100644 --- a/website/pages/intro/vs/smartstack.mdx +++ b/website/pages/docs/intro/vs/smartstack.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: Consul vs. SmartStack sidebar_title: SmartStack description: >- diff --git a/website/pages/intro/vs/zookeeper.mdx b/website/pages/docs/intro/vs/zookeeper.mdx similarity index 99% rename from website/pages/intro/vs/zookeeper.mdx rename to website/pages/docs/intro/vs/zookeeper.mdx index b4028a3fd..4cdd68a21 100644 --- a/website/pages/intro/vs/zookeeper.mdx +++ b/website/pages/docs/intro/vs/zookeeper.mdx @@ -1,5 +1,5 @@ --- -layout: intro +layout: docs page_title: 'Consul vs. ZooKeeper, doozerd, etcd' sidebar_title: 'ZooKeeper, doozerd, etcd' description: >- diff --git a/website/pages/docs/k8s/connect/ingress-gateways.mdx b/website/pages/docs/k8s/connect/ingress-gateways.mdx index 148c2af38..a310d203f 100644 --- a/website/pages/docs/k8s/connect/ingress-gateways.mdx +++ b/website/pages/docs/k8s/connect/ingress-gateways.mdx @@ -78,7 +78,7 @@ $ kubectl port-forward consul-server-0 8500 & If TLS is enabled use port 8501. --> Download the latest Consul binary from [Downloads](/downloads.html). +-> Download the latest Consul binary from [Downloads](/downloads). [https://releases.hashicorp.com/consul/](https://releases.hashicorp.com/consul/) If TLS is enabled set: diff --git a/website/pages/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx b/website/pages/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx index 1bc649452..61c575a07 100644 --- a/website/pages/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx +++ b/website/pages/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx @@ -77,7 +77,7 @@ externalServers: In most cases, `externalServers.hosts` will be the same as `client.join`, however, both keys must be set because they are used for different purposes: one for Serf LAN and the other for HTTPS connections. -Please see the [reference documentation](https://www.consul.io/docs/k8s/helm.html#v-externalservers-hosts) +Please see the [reference documentation](/docs/k8s/helm#v-externalservers-hosts) for more info. If your HTTPS port is different from Consul's default `8501`, you must also set `externalServers.httpsPort`. @@ -88,7 +88,7 @@ to help initialize ACL tokens for Consul clients and consul-k8s components for y ### Manually Bootstrapping ACLs -If you would like to call the [ACL bootstrapping API](/api/acl/acl.html#bootstrap-acls) yourself or if your cluster has already been bootstrapped with ACLs, +If you would like to call the [ACL bootstrapping API](/api/acl/acl#bootstrap-acls) yourself or if your cluster has already been bootstrapped with ACLs, you can provide the bootstrap token to the Helm chart. The Helm chart will then use this token to configure ACLs for Consul clients and any consul-k8s components you are enabling. @@ -118,7 +118,7 @@ The bootstrap token requires the following minimal permissions: Next, configure external servers. The Helm chart will use this configuration to talk to the Consul server's API to create policies, tokens, and an auth method. If you are [enabling Consul Connect](/docs/k8s/connect), `k8sAuthMethodHost` should be set to the address of your Kubernetes API server -so that the Consul servers can validate a Kubernetes service account token when using the [Kubernetes auth method](https://www.consul.io/docs/acl/auth-methods/kubernetes.html) +so that the Consul servers can validate a Kubernetes service account token when using the [Kubernetes auth method](/docs/acl/auth-methods/kubernetes) with `consul login`. ```yaml diff --git a/website/pages/docs/k8s/helm.mdx b/website/pages/docs/k8s/installation/helm.mdx similarity index 99% rename from website/pages/docs/k8s/helm.mdx rename to website/pages/docs/k8s/installation/helm.mdx index e181a1842..13879b8ee 100644 --- a/website/pages/docs/k8s/helm.mdx +++ b/website/pages/docs/k8s/installation/helm.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Helm Chart Reference - Kubernetes -sidebar_title: Helm Chart Reference +page_title: Install with Helm Chart +sidebar_title: Install with Helm Chart description: Reference for the Consul Helm chart. --- @@ -392,7 +392,7 @@ and consider if they're appropriate for your deployment. - `k8sAuthMethodHost` ((#v-externalservers-k8sauthmethodhost)) (`string: null`) - If you are setting `global.acls.manageSystemACLs` and `connectInject.enabled` to true, set `k8sAuthMethodHost` to the address of the Kubernetes API server. This address must be reachable from the Consul servers. - Please see the [Kubernetes Auth Method documentation](https://www.consul.io/docs/acl/auth-methods/kubernetes.html). Requires consul-k8s >= 0.14.0. + Please see the [Kubernetes Auth Method documentation](/docs/acl/auth-methods/kubernetes). Requires consul-k8s >= 0.14.0. You could retrieve this value from your `kubeconfig` by running: @@ -679,7 +679,7 @@ and consider if they're appropriate for your deployment. - `imageConsul` ((#v-connectinject-imageConsul)) (`string: global.image`) - The name of the Docker image (including any tag) for Consul. This is used for proxy service registration, Envoy configuration, etc. - - `imageEnvoy` ((#v-connectinject-imageEnvoy)) (`string: ""`) - The name of the Docker image (including any tag) for the Envoy sidecar. `envoy` must be on the executable path within this image. This Envoy version must be compatible with the Consul version used by the injector. If not specified this defaults to letting the injector choose the Envoy image. Check [supported Envoy versions](/docs/connect/proxies/envoy.html#supported-versions) to ensure the version you are using is compatible with Consul. + - `imageEnvoy` ((#v-connectinject-imageEnvoy)) (`string: ""`) - The name of the Docker image (including any tag) for the Envoy sidecar. `envoy` must be on the executable path within this image. This Envoy version must be compatible with the Consul version used by the injector. If not specified this defaults to letting the injector choose the Envoy image. Check [supported Envoy versions](/docs/connect/proxies/envoy#supported-versions) to ensure the version you are using is compatible with Consul. - `namespaceSelector` ((#v-connectinject-namespaceselector)) (`string: ""`) - A [selector](https:// kubernetes.io/docs/concepts/overview/working-with-objects/labels/) @@ -828,13 +828,13 @@ and consider if they're appropriate for your deployment. - `enabled` ((#v-meshgateway-enabled)) (`boolean: true`) - If mesh gateways are enabled, a Deployment will be created that runs gateways and Consul Connect will be configured to use gateways. - See [mesh gateway docs](https://www.consul.io/docs/connect/mesh_gateway.html). + See [mesh gateway docs](/docs/connect/mesh_gateway). Requirements: Consul 1.6.0+ and consul-k8s 0.15.0+ if using `global.acls.manageSystemACLs`. - `globalMode` ((#v-meshgateway-globalmode)) (`string: "local"`) - Globally configure which mode the gateway should run in. Can be set to either `"remote"`, `"local"`, `"none"` or `""` or `null`. - See [mesh gateway modes of operation](https://consul.io/docs/connect/mesh_gateway.html#modes-of-operation) for + See [mesh gateway modes of operation](/docs/connect/mesh_gateway#modes-of-operation) for a description of each mode. If set to anything other than `""` or `null`, `connectInject.centralConfig.enabled` should be set to true so that the global config will actually be used. diff --git a/website/pages/docs/k8s/installation/index.mdx b/website/pages/docs/k8s/installation/index.mdx index 4b6388195..73c49c9af 100644 --- a/website/pages/docs/k8s/installation/index.mdx +++ b/website/pages/docs/k8s/installation/index.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Installing Consul on Kubernetes - Kubernetes -sidebar_title: Installation +sidebar_title: Get Started description: >- Consul can run directly on Kubernetes, both in server or client mode. For pure-Kubernetes workloads, this enables Consul to also exist purely within diff --git a/website/pages/docs/k8s/installation/multi-cluster/kubernetes.mdx b/website/pages/docs/k8s/installation/multi-cluster/kubernetes.mdx index 1d4e72acc..713570970 100644 --- a/website/pages/docs/k8s/installation/multi-cluster/kubernetes.mdx +++ b/website/pages/docs/k8s/installation/multi-cluster/kubernetes.mdx @@ -220,8 +220,8 @@ The automatically generated federation secret contains: - **Consul server config** - This is a JSON snippet that must be used as part of the server config for secondary datacenters. It sets: - - [`primary_datacenter`](/docs/agent/options.html#primary_datacenter) to the name of the primary datacenter. - - [`primary_gateways`](/docs/agent/options.html#primary_gateways) to an array of IPs or hostnames + - [`primary_datacenter`](/docs/agent/options#primary_datacenter) to the name of the primary datacenter. + - [`primary_gateways`](/docs/agent/options#primary_gateways) to an array of IPs or hostnames for the mesh gateways in the primary datacenter. These are the addresses that Consul servers in secondary clusters will use to communicate with the primary datacenter. @@ -244,7 +244,7 @@ The automatically generated federation secret contains: ## Secondary Cluster(s) -With the primary cluster up and running, and the [federation secret](/docs/k8s/installation/multi-cluster#federation-secret) imported +With the primary cluster up and running, and the [federation secret](#federation-secret) imported into the secondary cluster, we can now install Consul into the secondary cluster. diff --git a/website/pages/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx b/website/pages/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx index b38029ad5..775460607 100644 --- a/website/pages/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx +++ b/website/pages/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx @@ -14,7 +14,7 @@ description: >- Consul datacenters running on non-kubernetes platforms like VMs or bare metal can be federated with Kubernetes datacenters. Just like with Kubernetes, one datacenter -must be the [primary](/docs/k8s/installation/multi-cluster#primary-datacenter). +must be the [primary](/docs/k8s/installation/multi-cluster/kubernetes#primary-datacenter). ## Kubernetes as the Primary @@ -78,7 +78,7 @@ $ consul tls cert create -client ==> Saved dc1-client-consul-0-key.pem ``` -Or use the [auto_encrypt](/docs/agent/options.html#auto_encrypt) feature. +Or use the [auto_encrypt](/docs/agent/options#auto_encrypt) feature. 1. The WAN addresses of the mesh gateways: @@ -175,7 +175,7 @@ ports { ## Kubernetes as the Secondary If you're running your primary datacenter on VMs then you'll need to manually -construct the [Federation Secret](#federation-secret) in order to federate +construct the [Federation Secret](/docs/k8s/installation/multi-cluster/kubernetes#federation-secret) in order to federate Kubernetes clusters as secondaries. -> Your VM cluster must be running mesh gateways, and have mesh gateway WAN diff --git a/website/pages/docs/k8s/operations/tls-on-existing-cluster.mdx b/website/pages/docs/k8s/tls-on-existing-cluster.mdx similarity index 96% rename from website/pages/docs/k8s/operations/tls-on-existing-cluster.mdx rename to website/pages/docs/k8s/tls-on-existing-cluster.mdx index a7cda254d..3310fc852 100644 --- a/website/pages/docs/k8s/operations/tls-on-existing-cluster.mdx +++ b/website/pages/docs/k8s/tls-on-existing-cluster.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Configuring TLS on an Existing Cluster -sidebar_title: Configuring TLS on an Existing Cluster -description: Configuring TLS on an existing Consul cluster running in Kubernetes +page_title: Configure TLS on an Existing Cluster +sidebar_title: Configure TLS on an Existing Cluster +description: Configure TLS on an existing Consul cluster running in Kubernetes --- # Configuring TLS on an Existing Cluster diff --git a/website/pages/docs/k8s/operations/uninstalling.mdx b/website/pages/docs/k8s/uninstall.mdx similarity index 91% rename from website/pages/docs/k8s/operations/uninstalling.mdx rename to website/pages/docs/k8s/uninstall.mdx index aa6ca5c1b..a61527d44 100644 --- a/website/pages/docs/k8s/operations/uninstalling.mdx +++ b/website/pages/docs/k8s/uninstall.mdx @@ -1,11 +1,11 @@ --- layout: docs -page_title: Uninstalling -sidebar_title: Uninstalling -description: Uninstalling Consul on Kubernetes +page_title: Uninstall +sidebar_title: Uninstall +description: Uninstall Consul on Kubernetes --- -# Uninstalling Consul +# Uninstall Consul Consul can be uninstalled via the `helm delete` command: diff --git a/website/pages/docs/k8s/operations/upgrading.mdx b/website/pages/docs/k8s/upgrade.mdx similarity index 95% rename from website/pages/docs/k8s/operations/upgrading.mdx rename to website/pages/docs/k8s/upgrade.mdx index 24684a655..40d2d7e16 100644 --- a/website/pages/docs/k8s/operations/upgrading.mdx +++ b/website/pages/docs/k8s/upgrade.mdx @@ -1,11 +1,11 @@ --- layout: docs -page_title: Upgrading -sidebar_title: Upgrading -description: Upgrading Consul on Kubernetes +page_title: Upgrade +sidebar_title: Upgrade +description: Upgrade Consul on Kubernetes --- -# Upgrading Consul on Kubernetes +# Upgrade Consul on Kubernetes To upgrade Consul on Kubernetes, we follow the same pattern as [generally upgrading Consul](/docs/upgrading), except we can use diff --git a/website/pages/docs/acl/acl-legacy.mdx b/website/pages/docs/security/acl/acl-legacy.mdx similarity index 100% rename from website/pages/docs/acl/acl-legacy.mdx rename to website/pages/docs/security/acl/acl-legacy.mdx diff --git a/website/pages/docs/acl/acl-migrate-tokens.mdx b/website/pages/docs/security/acl/acl-migrate-tokens.mdx similarity index 100% rename from website/pages/docs/acl/acl-migrate-tokens.mdx rename to website/pages/docs/security/acl/acl-migrate-tokens.mdx diff --git a/website/pages/docs/acl/acl-rules.mdx b/website/pages/docs/security/acl/acl-rules.mdx similarity index 100% rename from website/pages/docs/acl/acl-rules.mdx rename to website/pages/docs/security/acl/acl-rules.mdx diff --git a/website/pages/docs/acl/acl-system.mdx b/website/pages/docs/security/acl/acl-system.mdx similarity index 99% rename from website/pages/docs/acl/acl-system.mdx rename to website/pages/docs/security/acl/acl-system.mdx index 92a464a93..a0a0579c9 100644 --- a/website/pages/docs/acl/acl-system.mdx +++ b/website/pages/docs/security/acl/acl-system.mdx @@ -75,7 +75,7 @@ An ACL policy is a named set of rules and is composed of the following elements: - **Datacenters** - A list of datacenters the policy is valid within. - **Namespace** - - The namespace this policy resides within. (Added in Consul Enterprise 1.7.0) --> **Consul Enterprise Namespacing** - Rules defined in a policy in any namespace other than `default` will be [restricted](/docs/acl/acl-rules#namespace-rules-enterprise) to being able to grant a subset of the overall privileges and only affecting that single namespace. +-> **Consul Enterprise Namespacing** - Rules defined in a policy in any namespace other than `default` will be [restricted](/docs/acl/acl-rules#namespace-rules) to being able to grant a subset of the overall privileges and only affecting that single namespace. #### Builtin Policies @@ -292,7 +292,7 @@ system, or accessing Consul in special situations: | ------------------------------------------------------------------------ | ---------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [`acl.tokens.agent_master`](/docs/agent/options#acl_tokens_agent_master) | `OPTIONAL` | `OPTIONAL` | Special token that can be used to access [Agent API](/api/agent) when remote bearer token resolution fails; used for setting up the cluster such as doing initial join operations, see the [ACL Agent Master Token](#acl-agent-master-token) section for more details | | [`acl.tokens.agent`](/docs/agent/options#acl_tokens_agent) | `OPTIONAL` | `OPTIONAL` | Special token that is used for an agent's internal operations, see the [ACL Agent Token](#acl-agent-token) section for more details | -| [`acl.tokens.master`](/docs/agent/options#acl_tokens_master) | `OPTIONAL` | `N/A` | Special token used to bootstrap the ACL system, check the [Bootstrapping ACLs](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) tutorial for more details | +| [`acl.tokens.master`](/docs/agent/options#acl_tokens_master) | `OPTIONAL` | `N/A` | Special token used to bootstrap the ACL system, check the [Bootstrapping ACLs](https://learn.hashicorp.com/tutorials/consul/access-control-setup-production) tutorial for more details | | [`acl.tokens.default`](/docs/agent/options#acl_tokens_default) | `OPTIONAL` | `OPTIONAL` | Default token to use for client requests where no token is supplied; this is often configured with read-only access to services to enable DNS service discovery on agents | All of these tokens except the `master` token can all be introduced or updated via the [/v1/agent/token API](/api/agent#update-acl-tokens). diff --git a/website/pages/docs/acl/auth-methods/index.mdx b/website/pages/docs/security/acl/auth-methods/index.mdx similarity index 100% rename from website/pages/docs/acl/auth-methods/index.mdx rename to website/pages/docs/security/acl/auth-methods/index.mdx diff --git a/website/pages/docs/acl/auth-methods/jwt.mdx b/website/pages/docs/security/acl/auth-methods/jwt.mdx similarity index 100% rename from website/pages/docs/acl/auth-methods/jwt.mdx rename to website/pages/docs/security/acl/auth-methods/jwt.mdx diff --git a/website/pages/docs/acl/auth-methods/kubernetes.mdx b/website/pages/docs/security/acl/auth-methods/kubernetes.mdx similarity index 100% rename from website/pages/docs/acl/auth-methods/kubernetes.mdx rename to website/pages/docs/security/acl/auth-methods/kubernetes.mdx diff --git a/website/pages/docs/acl/auth-methods/oidc.mdx b/website/pages/docs/security/acl/auth-methods/oidc.mdx similarity index 100% rename from website/pages/docs/acl/auth-methods/oidc.mdx rename to website/pages/docs/security/acl/auth-methods/oidc.mdx diff --git a/website/pages/docs/acl/index.mdx b/website/pages/docs/security/acl/index.mdx similarity index 100% rename from website/pages/docs/acl/index.mdx rename to website/pages/docs/security/acl/index.mdx diff --git a/website/pages/docs/agent/encryption.mdx b/website/pages/docs/security/encryption.mdx similarity index 100% rename from website/pages/docs/agent/encryption.mdx rename to website/pages/docs/security/encryption.mdx diff --git a/website/pages/docs/internals/security.mdx b/website/pages/docs/security/index.mdx similarity index 99% rename from website/pages/docs/internals/security.mdx rename to website/pages/docs/security/index.mdx index d6bd6dfae..74ddfbc5b 100644 --- a/website/pages/docs/internals/security.mdx +++ b/website/pages/docs/security/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Security Model -sidebar_title: Security Model +page_title: Security +sidebar_title: Security description: >- Consul relies on both a lightweight gossip mechanism and an RPC system to provide various features. Both of the systems have different security diff --git a/website/pages/docs/common-errors.mdx b/website/pages/docs/troubleshoot/common-errors.mdx similarity index 100% rename from website/pages/docs/common-errors.mdx rename to website/pages/docs/troubleshoot/common-errors.mdx diff --git a/website/pages/docs/faq.mdx b/website/pages/docs/troubleshoot/faq.mdx similarity index 100% rename from website/pages/docs/faq.mdx rename to website/pages/docs/troubleshoot/faq.mdx diff --git a/website/pages/docs/upgrading/index.mdx b/website/pages/docs/upgrading/index.mdx index 4936c9844..11f1bb62c 100644 --- a/website/pages/docs/upgrading/index.mdx +++ b/website/pages/docs/upgrading/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Upgrading Consul -sidebar_title: Upgrading +page_title: Upgrade Consul +sidebar_title: Upgrade description: >- Consul is meant to be a long-running agent on any nodes participating in a Consul cluster. These nodes consistently communicate with each other. As such, diff --git a/website/pages/downloads/index.jsx b/website/pages/downloads/index.jsx index 7e5b61d7f..c925bcb80 100644 --- a/website/pages/downloads/index.jsx +++ b/website/pages/downloads/index.jsx @@ -13,7 +13,7 @@ export default function DownloadsPage({ releaseData }) { releaseData={releaseData} >

- » Download Consul Tools + » Download Consul Tools

Note for ARM users:

diff --git a/website/pages/intro/getting-started/agent.mdx b/website/pages/intro/getting-started/agent.mdx index 5e4ec3a19..0fa1a4f27 100644 --- a/website/pages/intro/getting-started/agent.mdx +++ b/website/pages/intro/getting-started/agent.mdx @@ -120,7 +120,7 @@ $ dig @127.0.0.1 -p 8600 Armons-MacBook-Air.node.consul Armons-MacBook-Air.node.consul. 0 IN A 127.0.0.1 ``` -## Stopping the Agent +## Stopping the Agent ((#stopping)) You can use `Ctrl-C` (the interrupt signal) to gracefully halt the agent. After interrupting the agent, you should see it leave the cluster diff --git a/website/pages/use-cases/multi-platform-service-mesh.jsx b/website/pages/use-cases/multi-platform-service-mesh.jsx index 55cfbb661..059e23aff 100644 --- a/website/pages/use-cases/multi-platform-service-mesh.jsx +++ b/website/pages/use-cases/multi-platform-service-mesh.jsx @@ -58,8 +58,7 @@ export default function MultiPlatformServiceMeshPage() { links: [ { text: 'Learn More', - url: - 'https://www.consul.io/docs/connect/l7-traffic-management.html', + url: '/docs/connect/l7-traffic-management', type: 'outbound', }, ], @@ -91,7 +90,7 @@ Splits = [ links: [ { text: 'Learn More', - url: 'https://www.consul.io/docs/platform/k8s/run.html', + url: '/docs/platform/k8s/run', type: 'inbound', }, ], @@ -147,7 +146,7 @@ Splits = [ links: [ { text: 'Learn More', - url: 'https://www.consul.io/docs/enterprise/index.html', + url: '/docs/enterprise', type: 'inbound', }, ], diff --git a/website/pages/use-cases/network-infrastructure-automation.jsx b/website/pages/use-cases/network-infrastructure-automation.jsx index 94ae4accc..b90443b68 100644 --- a/website/pages/use-cases/network-infrastructure-automation.jsx +++ b/website/pages/use-cases/network-infrastructure-automation.jsx @@ -37,7 +37,7 @@ export default function NetworkInfrastructureAutomationPage() { links: [ { text: 'Read More', - url: 'https://www.consul.io/docs/partnerships/index.html', + url: '/docs/partnerships', type: 'inbound', }, ], diff --git a/website/pages/use-cases/service-discovery-and-health-checking.jsx b/website/pages/use-cases/service-discovery-and-health-checking.jsx index bdced50f1..6bc3fea81 100644 --- a/website/pages/use-cases/service-discovery-and-health-checking.jsx +++ b/website/pages/use-cases/service-discovery-and-health-checking.jsx @@ -145,7 +145,7 @@ Judiths-MBP.lan.node.dc1.consul. 0 IN TXT "consul-network-segment=" links: [ { text: 'Read More', - url: 'https://www.consul.io/docs/enterprise/index.html', + url: '/docs/enterprise', type: 'inbound', }, ], From 2946d823bad590f7bb203270b8b2954ade8a3a5f Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 1 Sep 2020 19:13:11 +0100 Subject: [PATCH 17/73] ui: Improved filtering and sorting (#8591) --- .../consul-intention-search-bar/index.hbs | 74 +++++++ .../consul-intention-search-bar/index.js | 5 + .../consul-node-search-bar/index.hbs | 65 ++++++ .../consul-node-search-bar/index.js | 5 + .../consul-nspace-search-bar/index.hbs | 37 ++++ .../consul-nspace-search-bar/index.js | 5 + .../consul-policy-search-bar/index.hbs | 77 +++++++ .../consul-policy-search-bar/index.js | 3 + .../consul-role-search-bar/index.hbs | 43 ++++ .../consul-role-search-bar/index.js | 3 + .../index.hbs | 86 ++++++++ .../index.js | 5 + .../consul-service-search-bar/index.hbs | 111 ++++++++++ .../consul-service-search-bar/index.js | 5 + .../consul-token-search-bar/index.hbs | 57 +++++ .../consul-token-search-bar/index.js | 3 + .../consul-upstream-search-bar/index.hbs | 62 ++++++ .../consul-upstream-search-bar/index.js | 5 + ui-v2/app/components/popover-select/index.js | 2 +- .../popover-select/option/index.hbs | 1 + .../app/controllers/dc/acls/policies/index.js | 2 + ui-v2/app/controllers/dc/intentions/index.js | 2 + ui-v2/app/controllers/dc/nodes/index.js | 1 + ui-v2/app/controllers/dc/services/index.js | 10 + .../controllers/dc/services/show/instances.js | 10 + .../controllers/dc/services/show/services.js | 12 ++ .../controllers/dc/services/show/upstreams.js | 12 ++ ui-v2/app/filter/predicates/intention.js | 9 + ui-v2/app/filter/predicates/node.js | 8 + ui-v2/app/filter/predicates/policy.js | 28 +++ .../app/filter/predicates/service-instance.js | 19 ++ ui-v2/app/filter/predicates/service.js | 67 ++++++ ui-v2/app/filter/predicates/token.js | 21 ++ ui-v2/app/helpers/filter-predicate.js | 9 + ui-v2/app/helpers/policy/group.js | 3 +- ui-v2/app/initializers/sort.js | 2 + ui-v2/app/models/policy.js | 6 + ui-v2/app/models/service-instance.js | 46 +++- ui-v2/app/models/token.js | 5 + ui-v2/app/services/filter.js | 23 ++ ui-v2/app/services/repository/intention.js | 2 +- .../app/sort/comparators/service-instance.js | 23 ++ .../styles/base/components/buttons/index.scss | 3 - .../base/components/popover-menu/skin.scss | 1 - .../styles/base/icons/icon-placeholders.scss | 28 +++ ui-v2/app/styles/components/app-view.scss | 4 +- ui-v2/app/styles/components/filter-bar.scss | 15 +- .../styles/components/filter-bar/layout.scss | 36 +++- .../styles/components/filter-bar/skin.scss | 3 - .../main-nav-horizontal/layout.scss | 3 + .../app/styles/components/popover-select.scss | 58 ++++++ .../app/templates/dc/acls/policies/index.hbs | 195 ++++++++--------- ui-v2/app/templates/dc/acls/roles/index.hbs | 45 +--- ui-v2/app/templates/dc/acls/tokens/index.hbs | 196 ++++++++---------- ui-v2/app/templates/dc/intentions/index.hbs | 90 +++----- ui-v2/app/templates/dc/kv/index.hbs | 82 ++++---- ui-v2/app/templates/dc/nodes/index.hbs | 66 ++---- ui-v2/app/templates/dc/nspaces/index.hbs | 42 +--- ui-v2/app/templates/dc/services/index.hbs | 73 +++---- .../templates/dc/services/show/instances.hbs | 37 +++- .../dc/services/show/intentions/index.hbs | 88 +++----- .../templates/dc/services/show/services.hbs | 64 ++++-- .../templates/dc/services/show/upstreams.hbs | 53 ++++- .../dc/acls/policies/sorting.feature | 72 +++---- .../tests/acceptance/dc/nodes/sorting.feature | 146 ++++++------- .../dc/services/show/upstreams.feature | 1 + .../acceptance/dc/services/sorting.feature | 32 +-- ui-v2/tests/pages/dc/acls/policies/index.js | 2 +- ui-v2/tests/pages/dc/acls/roles/index.js | 2 +- ui-v2/tests/pages/dc/acls/tokens/index.js | 2 +- ui-v2/tests/pages/dc/intentions/index.js | 2 +- ui-v2/tests/pages/dc/nodes/index.js | 2 +- ui-v2/tests/pages/dc/nspaces/index.js | 2 +- ui-v2/tests/pages/dc/services/index.js | 2 +- ui-v2/tests/pages/dc/services/show.js | 2 +- .../unit/filter/predicates/intention-test.js | 43 ++++ .../unit/filter/predicates/service-test.js | 171 +++++++++++++++ 77 files changed, 1905 insertions(+), 732 deletions(-) create mode 100644 ui-v2/app/components/consul-intention-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-intention-search-bar/index.js create mode 100644 ui-v2/app/components/consul-node-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-node-search-bar/index.js create mode 100644 ui-v2/app/components/consul-nspace-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-nspace-search-bar/index.js create mode 100644 ui-v2/app/components/consul-policy-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-policy-search-bar/index.js create mode 100644 ui-v2/app/components/consul-role-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-role-search-bar/index.js create mode 100644 ui-v2/app/components/consul-service-instance-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-service-instance-search-bar/index.js create mode 100644 ui-v2/app/components/consul-service-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-service-search-bar/index.js create mode 100644 ui-v2/app/components/consul-token-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-token-search-bar/index.js create mode 100644 ui-v2/app/components/consul-upstream-search-bar/index.hbs create mode 100644 ui-v2/app/components/consul-upstream-search-bar/index.js create mode 100644 ui-v2/app/controllers/dc/services/show/services.js create mode 100644 ui-v2/app/controllers/dc/services/show/upstreams.js create mode 100644 ui-v2/app/filter/predicates/intention.js create mode 100644 ui-v2/app/filter/predicates/node.js create mode 100644 ui-v2/app/filter/predicates/policy.js create mode 100644 ui-v2/app/filter/predicates/service-instance.js create mode 100644 ui-v2/app/filter/predicates/service.js create mode 100644 ui-v2/app/filter/predicates/token.js create mode 100644 ui-v2/app/helpers/filter-predicate.js create mode 100644 ui-v2/app/services/filter.js create mode 100644 ui-v2/app/sort/comparators/service-instance.js create mode 100644 ui-v2/tests/unit/filter/predicates/intention-test.js create mode 100644 ui-v2/tests/unit/filter/predicates/service-test.js diff --git a/ui-v2/app/components/consul-intention-search-bar/index.hbs b/ui-v2/app/components/consul-intention-search-bar/index.hbs new file mode 100644 index 000000000..c848f9a51 --- /dev/null +++ b/ui-v2/app/components/consul-intention-search-bar/index.hbs @@ -0,0 +1,74 @@ +
+ +
+ + + + Permissions + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + +{{/let}} + + +
+
+ + + + {{#let (from-entries (array + (array "Action:asc" "Allow to Deny") + (array "Action:desc" "Deny to Allow") + (array "SourceName:asc" "Source: A to Z") + (array "SourceName:desc" "Source: Z to A") + (array "DestinationName:asc" "Destination: A to Z") + (array "DestinationName:desc" "Destination: Z to A") + (array "Precedence:asc" "Precedence: Ascending") + (array "Precedence:desc" "Precedence: Descending") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + + + + + + + + + +{{/let}} + + +
+ \ No newline at end of file diff --git a/ui-v2/app/components/consul-intention-search-bar/index.js b/ui-v2/app/components/consul-intention-search-bar/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-intention-search-bar/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/components/consul-node-search-bar/index.hbs b/ui-v2/app/components/consul-node-search-bar/index.hbs new file mode 100644 index 000000000..82814f07a --- /dev/null +++ b/ui-v2/app/components/consul-node-search-bar/index.hbs @@ -0,0 +1,65 @@ +
+ +
+ + + + Health Status + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + +{{/let}} + + +
+
+ + + + {{#let (from-entries (array + (array "Node:asc" "A to Z") + (array "Node:desc" "Z to A") + (array "Status:asc" "Unhealthy to Healthy") + (array "Status:desc" "Healthy to Unhealthy") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + +{{/let}} + + +
+ \ No newline at end of file diff --git a/ui-v2/app/components/consul-node-search-bar/index.js b/ui-v2/app/components/consul-node-search-bar/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-node-search-bar/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/components/consul-nspace-search-bar/index.hbs b/ui-v2/app/components/consul-nspace-search-bar/index.hbs new file mode 100644 index 000000000..f7a5beca9 --- /dev/null +++ b/ui-v2/app/components/consul-nspace-search-bar/index.hbs @@ -0,0 +1,37 @@ +
+ +
+ + + + {{#let (from-entries (array + (array "Name:asc" "A to Z") + (array "Name:desc" "Z to A") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + +{{/let}} + + +
+ \ No newline at end of file diff --git a/ui-v2/app/components/consul-nspace-search-bar/index.js b/ui-v2/app/components/consul-nspace-search-bar/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-nspace-search-bar/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/components/consul-policy-search-bar/index.hbs b/ui-v2/app/components/consul-policy-search-bar/index.hbs new file mode 100644 index 000000000..88e348200 --- /dev/null +++ b/ui-v2/app/components/consul-policy-search-bar/index.hbs @@ -0,0 +1,77 @@ +
+ +
+ + + + Datacenters + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + {{#each dcs as |dc|}} + + {{/each}} +{{/let}} + + + + + + + Type + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + +{{/let}} + + +
+
+ + + + {{#let (from-entries (array + (array "Name:asc" "A to Z") + (array "Name:desc" "Z to A") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + +{{/let}} + + +
+ diff --git a/ui-v2/app/components/consul-policy-search-bar/index.js b/ui-v2/app/components/consul-policy-search-bar/index.js new file mode 100644 index 000000000..557064773 --- /dev/null +++ b/ui-v2/app/components/consul-policy-search-bar/index.js @@ -0,0 +1,3 @@ +import Component from '@ember/component'; + +export default Component.extend({}); diff --git a/ui-v2/app/components/consul-role-search-bar/index.hbs b/ui-v2/app/components/consul-role-search-bar/index.hbs new file mode 100644 index 000000000..359f672c4 --- /dev/null +++ b/ui-v2/app/components/consul-role-search-bar/index.hbs @@ -0,0 +1,43 @@ +
+ +
+ + + + {{#let (from-entries (array + (array "Name:asc" "A to Z") + (array "Name:desc" "Z to A") + (array "CreateIndex:desc" "Newest to oldest") + (array "CreateIndex:asc" "Oldest to newest") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + +{{/let}} + + +
+ diff --git a/ui-v2/app/components/consul-role-search-bar/index.js b/ui-v2/app/components/consul-role-search-bar/index.js new file mode 100644 index 000000000..557064773 --- /dev/null +++ b/ui-v2/app/components/consul-role-search-bar/index.js @@ -0,0 +1,3 @@ +import Component from '@ember/component'; + +export default Component.extend({}); diff --git a/ui-v2/app/components/consul-service-instance-search-bar/index.hbs b/ui-v2/app/components/consul-service-instance-search-bar/index.hbs new file mode 100644 index 000000000..95df8a292 --- /dev/null +++ b/ui-v2/app/components/consul-service-instance-search-bar/index.hbs @@ -0,0 +1,86 @@ +
+ +
+ + + + Health Status + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + +{{/let}} + + +{{#if (gt sources.length 0)}} + + + + Source + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} +{{#each sources as |source|}} + +{{/each}} +{{/let}} + + +{{/if}} +
+
+ + + + {{#let (from-entries (array + (array "Name:asc" "A to Z") + (array "Name:desc" "Z to A") + (array "Status:asc" "Unhealthy to Healthy") + (array "Status:desc" "Healthy to Unhealthy") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + +{{/let}} + + +
+ diff --git a/ui-v2/app/components/consul-service-instance-search-bar/index.js b/ui-v2/app/components/consul-service-instance-search-bar/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-service-instance-search-bar/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/components/consul-service-search-bar/index.hbs b/ui-v2/app/components/consul-service-search-bar/index.hbs new file mode 100644 index 000000000..9013c715f --- /dev/null +++ b/ui-v2/app/components/consul-service-search-bar/index.hbs @@ -0,0 +1,111 @@ +
+ +
+ + + + Health Status + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + +{{/let}} + + + + + + Service Type + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + + + +{{/let}} + + +{{#if (gt sources.length 0)}} + + + + Source + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} +{{#each sources as |source|}} + +{{/each}} +{{/let}} + + +{{/if}} +
+
+ + + + {{#let (from-entries (array + (array "Name:asc" "A to Z") + (array "Name:desc" "Z to A") + (array "Status:asc" "Unhealthy to Healthy") + (array "Status:desc" "Healthy to Unhealthy") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + +{{/let}} + + +
+ diff --git a/ui-v2/app/components/consul-service-search-bar/index.js b/ui-v2/app/components/consul-service-search-bar/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-service-search-bar/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/components/consul-token-search-bar/index.hbs b/ui-v2/app/components/consul-token-search-bar/index.hbs new file mode 100644 index 000000000..87fdfe920 --- /dev/null +++ b/ui-v2/app/components/consul-token-search-bar/index.hbs @@ -0,0 +1,57 @@ +
+ +
+ + + + Type + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + +{{/let}} + + +
+
+ + + + {{#let (from-entries (array + (array "CreateTime:desc" "Newest to oldest") + (array "CreateTime:asc" "Oldest to newest") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + +{{/let}} + + +
+ diff --git a/ui-v2/app/components/consul-token-search-bar/index.js b/ui-v2/app/components/consul-token-search-bar/index.js new file mode 100644 index 000000000..557064773 --- /dev/null +++ b/ui-v2/app/components/consul-token-search-bar/index.js @@ -0,0 +1,3 @@ +import Component from '@ember/component'; + +export default Component.extend({}); diff --git a/ui-v2/app/components/consul-upstream-search-bar/index.hbs b/ui-v2/app/components/consul-upstream-search-bar/index.hbs new file mode 100644 index 000000000..d8a4af87b --- /dev/null +++ b/ui-v2/app/components/consul-upstream-search-bar/index.hbs @@ -0,0 +1,62 @@ +
+ +
+ + + + Type + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + +{{/let}} + + +
+
+ + + + {{#let (from-entries (array + (array "Name:asc" "A to Z") + (array "Name:desc" "Z to A") + (array "Status:asc" "Unhealthy to Healthy") + (array "Status:desc" "Healthy to Unhealthy") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + +{{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + +{{/let}} + + +
+ diff --git a/ui-v2/app/components/consul-upstream-search-bar/index.js b/ui-v2/app/components/consul-upstream-search-bar/index.js new file mode 100644 index 000000000..479865264 --- /dev/null +++ b/ui-v2/app/components/consul-upstream-search-bar/index.js @@ -0,0 +1,5 @@ +import Component from '@ember/component'; + +export default Component.extend({ + tagName: '', +}); diff --git a/ui-v2/app/components/popover-select/index.js b/ui-v2/app/components/popover-select/index.js index 432424945..5c63497fb 100644 --- a/ui-v2/app/components/popover-select/index.js +++ b/ui-v2/app/components/popover-select/index.js @@ -6,7 +6,7 @@ export default Component.extend(Slotted, { tagName: '', dom: service('dom'), multiple: false, - subtractive: true, + subtractive: false, onchange: function() {}, addOption: function(option) { if (typeof this._options === 'undefined') { diff --git a/ui-v2/app/components/popover-select/option/index.hbs b/ui-v2/app/components/popover-select/option/index.hbs index 8b2cca90e..86119b1e9 100644 --- a/ui-v2/app/components/popover-select/option/index.hbs +++ b/ui-v2/app/components/popover-select/option/index.hbs @@ -1,6 +1,7 @@ {{#let components.MenuItem as |MenuItem|}} diff --git a/ui-v2/app/controllers/dc/acls/policies/index.js b/ui-v2/app/controllers/dc/acls/policies/index.js index f99cde37f..54f6cac2d 100644 --- a/ui-v2/app/controllers/dc/acls/policies/index.js +++ b/ui-v2/app/controllers/dc/acls/policies/index.js @@ -2,6 +2,8 @@ import Controller from '@ember/controller'; export default Controller.extend({ queryParams: { sortBy: 'sort', + dc: 'dc', + type: 'type', search: { as: 'filter', replace: true, diff --git a/ui-v2/app/controllers/dc/intentions/index.js b/ui-v2/app/controllers/dc/intentions/index.js index f99cde37f..c22bc54ce 100644 --- a/ui-v2/app/controllers/dc/intentions/index.js +++ b/ui-v2/app/controllers/dc/intentions/index.js @@ -1,7 +1,9 @@ import Controller from '@ember/controller'; + export default Controller.extend({ queryParams: { sortBy: 'sort', + access: 'access', search: { as: 'filter', replace: true, diff --git a/ui-v2/app/controllers/dc/nodes/index.js b/ui-v2/app/controllers/dc/nodes/index.js index a33e295d3..b3f36efae 100644 --- a/ui-v2/app/controllers/dc/nodes/index.js +++ b/ui-v2/app/controllers/dc/nodes/index.js @@ -3,6 +3,7 @@ import Controller from '@ember/controller'; export default Controller.extend({ queryParams: { sortBy: 'sort', + status: 'status', search: { as: 'filter', replace: true, diff --git a/ui-v2/app/controllers/dc/services/index.js b/ui-v2/app/controllers/dc/services/index.js index deebfa8e0..ad77eb593 100644 --- a/ui-v2/app/controllers/dc/services/index.js +++ b/ui-v2/app/controllers/dc/services/index.js @@ -4,6 +4,9 @@ import { computed } from '@ember/object'; export default Controller.extend({ queryParams: { sortBy: 'sort', + status: 'status', + source: 'source', + type: 'type', search: { as: 'filter', }, @@ -13,4 +16,11 @@ export default Controller.extend({ return item.Kind !== 'connect-proxy'; }); }), + externalSources: computed('services', function() { + const sources = this.services.reduce(function(prev, item) { + return prev.concat(item.ExternalSources || []); + }, []); + // unique, non-empty values, alpha sort + return [...new Set(sources)].filter(Boolean).sort(); + }), }); diff --git a/ui-v2/app/controllers/dc/services/show/instances.js b/ui-v2/app/controllers/dc/services/show/instances.js index a33e295d3..d8a4846dc 100644 --- a/ui-v2/app/controllers/dc/services/show/instances.js +++ b/ui-v2/app/controllers/dc/services/show/instances.js @@ -1,11 +1,21 @@ import Controller from '@ember/controller'; +import { computed } from '@ember/object'; export default Controller.extend({ queryParams: { sortBy: 'sort', + status: 'status', + source: 'source', search: { as: 'filter', replace: true, }, }, + externalSources: computed('items', function() { + const sources = this.items.reduce(function(prev, item) { + return prev.concat(item.ExternalSources || []); + }, []); + // unique, non-empty values, alpha sort + return [...new Set(sources)].filter(Boolean).sort(); + }), }); diff --git a/ui-v2/app/controllers/dc/services/show/services.js b/ui-v2/app/controllers/dc/services/show/services.js new file mode 100644 index 000000000..b71de3054 --- /dev/null +++ b/ui-v2/app/controllers/dc/services/show/services.js @@ -0,0 +1,12 @@ +import Controller from '@ember/controller'; + +export default Controller.extend({ + queryParams: { + sortBy: 'sort', + instance: 'instance', + search: { + as: 'filter', + replace: true, + }, + }, +}); diff --git a/ui-v2/app/controllers/dc/services/show/upstreams.js b/ui-v2/app/controllers/dc/services/show/upstreams.js new file mode 100644 index 000000000..b71de3054 --- /dev/null +++ b/ui-v2/app/controllers/dc/services/show/upstreams.js @@ -0,0 +1,12 @@ +import Controller from '@ember/controller'; + +export default Controller.extend({ + queryParams: { + sortBy: 'sort', + instance: 'instance', + search: { + as: 'filter', + replace: true, + }, + }, +}); diff --git a/ui-v2/app/filter/predicates/intention.js b/ui-v2/app/filter/predicates/intention.js new file mode 100644 index 000000000..fd7e7ffce --- /dev/null +++ b/ui-v2/app/filter/predicates/intention.js @@ -0,0 +1,9 @@ +export default () => ({ accesses = [] }) => item => { + if (accesses.length > 0) { + if (accesses.includes(item.Action)) { + return true; + } + return false; + } + return true; +}; diff --git a/ui-v2/app/filter/predicates/node.js b/ui-v2/app/filter/predicates/node.js new file mode 100644 index 000000000..7effc140a --- /dev/null +++ b/ui-v2/app/filter/predicates/node.js @@ -0,0 +1,8 @@ +export default () => ({ statuses = [] }) => { + return item => { + if (statuses.length > 0 && !statuses.includes(item.Status)) { + return false; + } + return true; + }; +}; diff --git a/ui-v2/app/filter/predicates/policy.js b/ui-v2/app/filter/predicates/policy.js new file mode 100644 index 000000000..f7bd68fd5 --- /dev/null +++ b/ui-v2/app/filter/predicates/policy.js @@ -0,0 +1,28 @@ +import setHelpers from 'mnemonist/set'; +export default () => ({ dcs = [], types = [] }) => { + const typeIncludes = ['global-management', 'standard'].reduce((prev, item) => { + prev[item] = types.includes(item); + return prev; + }, {}); + const selectedDcs = new Set(dcs); + return item => { + let type = true; + let dc = true; + if (types.length > 0) { + type = false; + if (typeIncludes['global-management'] && item.isGlobalManagement) { + type = true; + } + if (typeIncludes['standard'] && !item.isGlobalManagement) { + type = true; + } + } + if (dcs.length > 0) { + // if datacenters is undefined it means the policy is applicable to all datacenters + dc = + typeof item.Datacenters === 'undefined' || + setHelpers.intersectionSize(selectedDcs, new Set(item.Datacenters)) > 0; + } + return type && dc; + }; +}; diff --git a/ui-v2/app/filter/predicates/service-instance.js b/ui-v2/app/filter/predicates/service-instance.js new file mode 100644 index 000000000..09d05c130 --- /dev/null +++ b/ui-v2/app/filter/predicates/service-instance.js @@ -0,0 +1,19 @@ +import setHelpers from 'mnemonist/set'; +export default () => ({ sources = [], statuses = [] }) => { + const uniqueSources = new Set(sources); + return item => { + if (statuses.length > 0) { + if (statuses.includes(item.Status)) { + return true; + } + return false; + } + if (sources.length > 0) { + if (setHelpers.intersectionSize(uniqueSources, new Set(item.ExternalSources || [])) !== 0) { + return true; + } + return false; + } + return true; + }; +}; diff --git a/ui-v2/app/filter/predicates/service.js b/ui-v2/app/filter/predicates/service.js new file mode 100644 index 000000000..9aa5d4f09 --- /dev/null +++ b/ui-v2/app/filter/predicates/service.js @@ -0,0 +1,67 @@ +import setHelpers from 'mnemonist/set'; +export default () => ({ instances = [], sources = [], statuses = [], types = [] }) => { + const uniqueSources = new Set(sources); + const typeIncludes = [ + 'ingress-gateway', + 'terminating-gateway', + 'mesh-gateway', + 'service', + 'mesh-enabled', + 'mesh-disabled', + ].reduce((prev, item) => { + prev[item] = types.includes(item); + return prev; + }, {}); + const instanceIncludes = ['registered', 'not-registered'].reduce((prev, item) => { + prev[item] = instances.includes(item); + return prev; + }, {}); + return item => { + if (statuses.length > 0) { + if (statuses.includes(item.MeshStatus)) { + return true; + } + return false; + } + if (instances.length > 0) { + if (item.InstanceCount > 0) { + if (instanceIncludes['registered']) { + return true; + } + } else { + if (instanceIncludes['not-registered']) { + return true; + } + } + return false; + } + if (types.length > 0) { + if (typeIncludes['ingress-gateway'] && item.Kind === 'ingress-gateway') { + return true; + } + if (typeIncludes['terminating-gateway'] && item.Kind === 'terminating-gateway') { + return true; + } + if (typeIncludes['mesh-gateway'] && item.Kind === 'mesh-gateway') { + return true; + } + if (typeIncludes['service'] && typeof item.Kind === 'undefined') { + return true; + } + if (typeIncludes['mesh-enabled'] && typeof item.Proxy !== 'undefined') { + return true; + } + if (typeIncludes['mesh-disabled'] && typeof item.Proxy === 'undefined') { + return true; + } + return false; + } + if (sources.length > 0) { + if (setHelpers.intersectionSize(uniqueSources, new Set(item.ExternalSources || [])) !== 0) { + return true; + } + return false; + } + return true; + }; +}; diff --git a/ui-v2/app/filter/predicates/token.js b/ui-v2/app/filter/predicates/token.js new file mode 100644 index 000000000..e50b1f291 --- /dev/null +++ b/ui-v2/app/filter/predicates/token.js @@ -0,0 +1,21 @@ +export default () => ({ types = [] }) => { + const typeIncludes = ['global-management', 'global', 'local'].reduce((prev, item) => { + prev[item] = types.includes(item); + return prev; + }, {}); + return item => { + if (types.length > 0) { + if (typeIncludes['global-management'] && item.isGlobalManagement) { + return true; + } + if (typeIncludes['global'] && !item.Local) { + return true; + } + if (typeIncludes['local'] && item.Local) { + return true; + } + return false; + } + return true; + }; +}; diff --git a/ui-v2/app/helpers/filter-predicate.js b/ui-v2/app/helpers/filter-predicate.js new file mode 100644 index 000000000..d90b37007 --- /dev/null +++ b/ui-v2/app/helpers/filter-predicate.js @@ -0,0 +1,9 @@ +import Helper from '@ember/component/helper'; +import { inject as service } from '@ember/service'; + +export default Helper.extend({ + filter: service('filter'), + compute([type, filters], hash) { + return this.filter.predicate(type)(filters); + }, +}); diff --git a/ui-v2/app/helpers/policy/group.js b/ui-v2/app/helpers/policy/group.js index b58ad4640..1a2c829d0 100644 --- a/ui-v2/app/helpers/policy/group.js +++ b/ui-v2/app/helpers/policy/group.js @@ -1,7 +1,6 @@ import { helper } from '@ember/component/helper'; import { get } from '@ember/object'; - -const MANAGEMENT_ID = '00000000-0000-0000-0000-000000000001'; +import { MANAGEMENT_ID } from 'consul-ui/models/policy'; export default helper(function policyGroup([items] /*, hash*/) { return items.reduce( diff --git a/ui-v2/app/initializers/sort.js b/ui-v2/app/initializers/sort.js index 6b173481b..1f30f55b0 100644 --- a/ui-v2/app/initializers/sort.js +++ b/ui-v2/app/initializers/sort.js @@ -1,4 +1,5 @@ import service from 'consul-ui/sort/comparators/service'; +import serviceInstance from 'consul-ui/sort/comparators/service-instance'; import kv from 'consul-ui/sort/comparators/kv'; import check from 'consul-ui/sort/comparators/check'; import intention from 'consul-ui/sort/comparators/intention'; @@ -13,6 +14,7 @@ export function initialize(container) { const Sort = container.resolveRegistration('service:sort'); const comparators = { service: service(), + serviceInstance: serviceInstance(), kv: kv(), check: check(), intention: intention(), diff --git a/ui-v2/app/models/policy.js b/ui-v2/app/models/policy.js index 7db67378e..b4c2661b6 100644 --- a/ui-v2/app/models/policy.js +++ b/ui-v2/app/models/policy.js @@ -1,9 +1,12 @@ import Model from 'ember-data/model'; import attr from 'ember-data/attr'; +import { computed } from '@ember/object'; export const PRIMARY_KEY = 'uid'; export const SLUG_KEY = 'ID'; +export const MANAGEMENT_ID = '00000000-0000-0000-0000-000000000001'; + export default Model.extend({ [PRIMARY_KEY]: attr('string'), [SLUG_KEY]: attr('string'), @@ -19,6 +22,9 @@ export default Model.extend({ // frontend only for ordering where CreateIndex can't be used CreateTime: attr('date', { defaultValue: 0 }), // + isGlobalManagement: computed('ID', function() { + return this.ID === MANAGEMENT_ID; + }), Datacenter: attr('string'), Namespace: attr('string'), SyncTime: attr('number'), diff --git a/ui-v2/app/models/service-instance.js b/ui-v2/app/models/service-instance.js index 4b8946098..9df5bd6d0 100644 --- a/ui-v2/app/models/service-instance.js +++ b/ui-v2/app/models/service-instance.js @@ -1,7 +1,8 @@ import Model from 'ember-data/model'; import attr from 'ember-data/attr'; import { belongsTo } from 'ember-data/relationships'; -import { filter, alias } from '@ember/object/computed'; +import { computed } from '@ember/object'; +import { or, filter, alias } from '@ember/object/computed'; export const PRIMARY_KEY = 'uid'; export const SLUG_KEY = 'Node.Node,Service.ID'; @@ -19,13 +20,52 @@ export default Model.extend({ Checks: attr(), SyncTime: attr('number'), meta: attr(), + Name: or('Service.ID', 'Service.Service'), Tags: alias('Service.Tags'), Meta: alias('Service.Meta'), Namespace: alias('Service.Namespace'), - ServiceChecks: filter('Checks', function(item, i, arr) { + ExternalSources: computed('Service.Meta', function() { + const sources = Object.entries(this.Service.Meta || {}) + .filter(([key, value]) => key === 'external-source') + .map(([key, value]) => { + return value; + }); + return [...new Set(sources)]; + }), + ServiceChecks: filter('Checks.[]', function(item, i, arr) { return item.ServiceID !== ''; }), - NodeChecks: filter('Checks', function(item, i, arr) { + NodeChecks: filter('Checks.[]', function(item, i, arr) { return item.ServiceID === ''; }), + Status: computed('ChecksPassing', 'ChecksWarning', 'ChecksCritical', function() { + switch (true) { + case this.ChecksCritical.length !== 0: + return 'critical'; + case this.ChecksWarning.length !== 0: + return 'warning'; + case this.ChecksPassing.length !== 0: + return 'passing'; + default: + return 'empty'; + } + }), + ChecksPassing: computed('Checks.[]', function() { + return this.Checks.filter(item => item.Status === 'passing'); + }), + ChecksWarning: computed('Checks.[]', function() { + return this.Checks.filter(item => item.Status === 'warning'); + }), + ChecksCritical: computed('Checks.[]', function() { + return this.Checks.filter(item => item.Status === 'critical'); + }), + PercentageChecksPassing: computed('Checks.[]', 'ChecksPassing', function() { + return (this.ChecksPassing.length / this.Checks.length) * 100; + }), + PercentageChecksWarning: computed('Checks.[]', 'ChecksWarning', function() { + return (this.ChecksWarning.length / this.Checks.length) * 100; + }), + PercentageChecksCritical: computed('Checks.[]', 'ChecksCritical', function() { + return (this.ChecksCritical.length / this.Checks.length) * 100; + }), }); diff --git a/ui-v2/app/models/token.js b/ui-v2/app/models/token.js index 5b5707287..9015f7362 100644 --- a/ui-v2/app/models/token.js +++ b/ui-v2/app/models/token.js @@ -1,5 +1,7 @@ import Model from 'ember-data/model'; import attr from 'ember-data/attr'; +import { computed } from '@ember/object'; +import { MANAGEMENT_ID } from 'consul-ui/models/policy'; export const PRIMARY_KEY = 'uid'; export const SLUG_KEY = 'AccessorID'; @@ -24,6 +26,9 @@ export default Model.extend({ Datacenter: attr('string'), Namespace: attr('string'), Local: attr('boolean'), + isGlobalManagement: computed('Policies.[]', function() { + return (this.Policies || []).find(item => item.ID === MANAGEMENT_ID); + }), Policies: attr({ defaultValue: function() { return []; diff --git a/ui-v2/app/services/filter.js b/ui-v2/app/services/filter.js new file mode 100644 index 000000000..5123a617c --- /dev/null +++ b/ui-v2/app/services/filter.js @@ -0,0 +1,23 @@ +import Service from '@ember/service'; + +import service from 'consul-ui/filter/predicates/service'; +import serviceInstance from 'consul-ui/filter/predicates/service-instance'; +import node from 'consul-ui/filter/predicates/node'; +import intention from 'consul-ui/filter/predicates/intention'; +import token from 'consul-ui/filter/predicates/token'; +import policy from 'consul-ui/filter/predicates/policy'; + +const predicates = { + service: service(), + serviceInstance: serviceInstance(), + node: node(), + intention: intention(), + token: token(), + policy: policy(), +}; + +export default Service.extend({ + predicate: function(type) { + return predicates[type]; + }, +}); diff --git a/ui-v2/app/services/repository/intention.js b/ui-v2/app/services/repository/intention.js index 72b8a6227..e2ad0e697 100644 --- a/ui-v2/app/services/repository/intention.js +++ b/ui-v2/app/services/repository/intention.js @@ -16,7 +16,7 @@ export default RepositoryService.extend({ const query = { dc: dc, nspace: nspace, - filter: `SourceName == "${slug}" or DestinationName == "${slug}"`, + filter: `SourceName == "${slug}" or DestinationName == "${slug}" or SourceName == "*" or DestinationName == "*"`, }; if (typeof configuration.cursor !== 'undefined') { query.index = configuration.cursor; diff --git a/ui-v2/app/sort/comparators/service-instance.js b/ui-v2/app/sort/comparators/service-instance.js new file mode 100644 index 000000000..66f40eb16 --- /dev/null +++ b/ui-v2/app/sort/comparators/service-instance.js @@ -0,0 +1,23 @@ +export default () => key => { + if (key.startsWith('Status:')) { + const [, dir] = key.split(':'); + const props = [ + 'PercentageChecksPassing', + 'PercentageChecksWarning', + 'PercentageChecksCritical', + ]; + if (dir === 'asc') { + props.reverse(); + } + return function(a, b) { + for (let i in props) { + let prop = props[i]; + if (a[prop] === b[prop]) { + continue; + } + return a[prop] > b[prop] ? -1 : 1; + } + }; + } + return key; +}; diff --git a/ui-v2/app/styles/base/components/buttons/index.scss b/ui-v2/app/styles/base/components/buttons/index.scss index 8f5aa10dd..bc1825219 100644 --- a/ui-v2/app/styles/base/components/buttons/index.scss +++ b/ui-v2/app/styles/base/components/buttons/index.scss @@ -1,5 +1,2 @@ @import './skin'; @import './layout'; -%sort-button { - @extend %split-button; -} diff --git a/ui-v2/app/styles/base/components/popover-menu/skin.scss b/ui-v2/app/styles/base/components/popover-menu/skin.scss index 917bc3560..8dc9f6d15 100644 --- a/ui-v2/app/styles/base/components/popover-menu/skin.scss +++ b/ui-v2/app/styles/base/components/popover-menu/skin.scss @@ -6,7 +6,6 @@ width: 16px; height: 16px; position: relative; - top: 2px; } %popover-menu-toggle:checked + label > *::after { @extend %with-chevron-up-mask; diff --git a/ui-v2/app/styles/base/icons/icon-placeholders.scss b/ui-v2/app/styles/base/icons/icon-placeholders.scss index 907ab91d6..6f7988dd3 100644 --- a/ui-v2/app/styles/base/icons/icon-placeholders.scss +++ b/ui-v2/app/styles/base/icons/icon-placeholders.scss @@ -948,6 +948,16 @@ mask-image: $logo-bitbucket-monochrome-svg; } +%with-logo-consul-color-icon { + @extend %with-icon; + background-image: $consul-logo-color-svg; +} +%with-logo-consul-color-mask { + @extend %with-mask; + -webkit-mask-image: $consul-logo-color-svg; + mask-image: $consul-logo-color-svg; +} + %with-logo-gcp-color-icon { @extend %with-icon; background-image: $logo-gcp-color-svg; @@ -1047,6 +1057,15 @@ -webkit-mask-image: $logo-microsoft-color-svg; mask-image: $logo-microsoft-color-svg; } +%with-logo-nomad-color-icon { + @extend %with-icon; + background-image: $nomad-logo-color-svg; +} +%with-logo-nomad-color-mask { + @extend %with-mask; + -webkit-mask-image: $nomad-logo-color-svg; + mask-image: $nomad-logo-color-svg; +} %with-logo-okta-color-icon { @extend %with-icon; @@ -1098,6 +1117,15 @@ mask-image: $logo-slack-monochrome-svg; } +%with-logo-terraform-color-icon { + @extend %with-icon; + background-image: $terraform-logo-color-svg; +} +%with-logo-terraform-color-mask { + @extend %with-mask; + -webkit-mask-image: $terraform-logo-color-svg; + mask-image: $terraform-logo-color-svg; +} %with-logo-vmware-color-icon { @extend %with-icon; background-image: $logo-vmware-color-svg; diff --git a/ui-v2/app/styles/components/app-view.scss b/ui-v2/app/styles/components/app-view.scss index 650b6a87c..4f605fb17 100644 --- a/ui-v2/app/styles/components/app-view.scss +++ b/ui-v2/app/styles/components/app-view.scss @@ -72,10 +72,10 @@ main { display: none; } #toolbar-toggle:checked + * { - display: block; + display: flex; } html.template-service.template-show #toolbar-toggle + * { - display: block; + display: flex; padding: 4px; } html.template-service.template-show .actions { diff --git a/ui-v2/app/styles/components/filter-bar.scss b/ui-v2/app/styles/components/filter-bar.scss index c27212a63..935cfe6a6 100644 --- a/ui-v2/app/styles/components/filter-bar.scss +++ b/ui-v2/app/styles/components/filter-bar.scss @@ -3,14 +3,18 @@ .filter-bar { @extend %filter-bar; } -%filter-bar { +%filter-bar .popover-select { + height: 35px; position: relative; z-index: 3; } -%filter-bar:not(.with-sort) { +%filter-bar [role='menuitem'] { + justify-content: normal !important; +} +html.template-acl.template-list .filter-bar { @extend %filter-bar-reversed; } -%filter-bar [role='radiogroup'] { +html.template-acl.template-list .filter-bar [role='radiogroup'] { @extend %expanded-single-select; } %filter-bar span::before { @@ -19,6 +23,11 @@ margin-left: -2px; } +%filter-bar .popover-menu > [type='checkbox']:checked + label button { + color: $blue-500; + background-color: $gray-100; +} + %filter-bar .value-passing span::before { @extend %with-check-circle-fill-icon, %as-pseudo; } diff --git a/ui-v2/app/styles/components/filter-bar/layout.scss b/ui-v2/app/styles/components/filter-bar/layout.scss index 563b0d067..5a08de6e1 100644 --- a/ui-v2/app/styles/components/filter-bar/layout.scss +++ b/ui-v2/app/styles/components/filter-bar/layout.scss @@ -5,34 +5,50 @@ margin-top: 0 !important; margin-bottom: -12px; } +%filter-bar .filters { + display: flex; + margin-right: 12px; +} +%filter-bar .filters > *:not(:last-child) { + margin-right: 6px; +} %filter-bar + :not(.notice) { margin-top: 1.8em; } +%filter-bar fieldset { + flex: 0 1 auto; + width: auto; +} %filter-bar-reversed { flex-direction: row-reverse; padding: 4px; margin-bottom: 8px !important; } -%filter-bar fieldset { - flex: 0 1 auto; +%filter-bar-reversed fieldset { + min-width: 210px; width: auto; } -%filter-bar fieldset:first-child:not(:last-child) { - flex: 1 1 auto; - margin-right: 12px; -} %filter-bar-reversed fieldset:first-child:not(:last-child) { flex: 0 1 auto; margin-left: auto; } -%filter-bar-reversed fieldset { - min-width: 210px; - width: auto; -} %filter-bar-reversed > *:first-child { margin-left: 12px; } +@media #{$--horizontal-filters} { + %filter-bar fieldset:first-child:not(:last-child) { + flex: 1 1 auto; + margin-right: 12px; + } +} @media #{$--lt-horizontal-filters} { + %filter-bar { + flex-wrap: wrap; + } + %filter-bar fieldset { + flex: 0 1 100%; + margin-bottom: 8px; + } %filter-bar-reversed > *:first-child { margin-left: 0; } diff --git a/ui-v2/app/styles/components/filter-bar/skin.scss b/ui-v2/app/styles/components/filter-bar/skin.scss index f6090f4b7..f0998780a 100644 --- a/ui-v2/app/styles/components/filter-bar/skin.scss +++ b/ui-v2/app/styles/components/filter-bar/skin.scss @@ -13,9 +13,6 @@ } } @media #{$--lt-horizontal-selects} { - %filter-bar label:not(:last-child) { - border-bottom: $decor-border-100; - } } %filter-bar [role='radiogroup'] label { cursor: pointer; diff --git a/ui-v2/app/styles/components/main-nav-horizontal/layout.scss b/ui-v2/app/styles/components/main-nav-horizontal/layout.scss index 04a369090..534ad871e 100644 --- a/ui-v2/app/styles/components/main-nav-horizontal/layout.scss +++ b/ui-v2/app/styles/components/main-nav-horizontal/layout.scss @@ -14,6 +14,9 @@ right: auto; top: 28px !important; } +%main-nav-horizontal .popover-menu > label > button::after { + top: 2px; +} @media #{$--horizontal-nav} { %main-nav-horizontal > ul, %main-nav-horizontal-panel { diff --git a/ui-v2/app/styles/components/popover-select.scss b/ui-v2/app/styles/components/popover-select.scss index 928eaf81a..7cc3675e4 100644 --- a/ui-v2/app/styles/components/popover-select.scss +++ b/ui-v2/app/styles/components/popover-select.scss @@ -1,6 +1,64 @@ .popover-select { @extend %popover-select; } +%popover-select label { + height: 100%; +} %popover-select label > * { + @extend %button; + padding: 0 8px !important; + height: 100% !important; + justify-content: space-between !important; + min-width: auto !important; +} +%popover-select label > *::after { + margin-left: 6px; +} +%popover-select.type-sort label > * { @extend %sort-button; } + +%popover-select.type-access button::before, +%popover-select.type-source button::before, +%popover-select.type-status button::before { + margin-right: 10px; +} +%popover-select .value-allow button::before, +%popover-select .value-passing button::before { + @extend %with-check-circle-fill-mask, %as-pseudo; + color: $green-500; +} +%popover-select .value-warning button::before { + @extend %with-alert-triangle-mask, %as-pseudo; + color: $orange-500; +} +%popover-select .value-deny button::before, +%popover-select .value-critical button::before { + @extend %with-cancel-square-fill-mask, %as-pseudo; + color: $red-500; +} +%popover-select .value-empty button::before { + @extend %with-minus-square-fill-mask, %as-pseudo; + color: $gray-400; +} +%popover-select.type-source li button { + text-transform: capitalize; +} +%popover-select.type-source li.aws button { + text-transform: uppercase; +} +%popover-select .aws button::before { + @extend %with-logo-aws-color-icon, %as-pseudo; +} +%popover-select .kubernetes button::before { + @extend %with-logo-kubernetes-color-icon, %as-pseudo; +} +%popover-select .consul button::before { + @extend %with-logo-consul-color-icon, %as-pseudo; +} +%popover-select .nomad button::before { + @extend %with-logo-nomad-color-icon, %as-pseudo; +} +%popover-select .terraform button::before { + @extend %with-logo-terraform-color-icon, %as-pseudo; +} diff --git a/ui-v2/app/templates/dc/acls/policies/index.hbs b/ui-v2/app/templates/dc/acls/policies/index.hbs index 7909a6f96..b7c7094b6 100644 --- a/ui-v2/app/templates/dc/acls/policies/index.hbs +++ b/ui-v2/app/templates/dc/acls/policies/index.hbs @@ -3,115 +3,100 @@ {{else}} {{title 'Access Controls'}} {{/if}} +{{#let (hash + types=(if type (split type ',') undefined) + dcs=(if dc (split dc ',') undefined) +) as |filters|}} + {{#let (or sortBy "Name:asc") as |sort|}} + + + {{partial 'dc/acls/policies/notifications'}} + + +

+ Access Controls +

+
+ + {{#if isAuthorized }} + {{partial 'dc/acls/nav'}} + {{/if}} + + + {{partial 'dc/acls/disabled'}} + + + {{partial 'dc/acls/authorization'}} + + + Create + + + {{#if (gt items.length 0) }} + - - {{partial 'dc/acls/policies/notifications'}} - - -

- Access Controls -

-
- -{{#if isAuthorized }} - {{partial 'dc/acls/nav'}} -{{/if}} - - - {{partial 'dc/acls/disabled'}} - - - {{partial 'dc/acls/authorization'}} - - - Create - - - {{#if (gt items.length 0) }} - - - - - - {{#let (from-entries (array - (array "Name:asc" "A to Z") - (array "Name:desc" "Z to A") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - -{{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - -{{/let}} - - - - - {{/if}} - - - {{#let (sort-by (comparator 'policy' sort) items) as |sorted|}} - - - + {{/if}} - - - -

- {{#if (gt items.length 0)}} - No policies found - {{else}} - Welcome to Policies - {{/if}} -

+ +{{#let (filter (filter-predicate 'policy' filters) items) as |filtered|}} + {{#let (sort-by (comparator 'policy' sort) filtered) as |sorted|}} + + + - -

- {{#if (gt items.length 0)}} - No policies where found matching that search, or you may not have access to view the policies you are searching for. - {{else}} - There don't seem to be any policies, or you may not have access to view policies yet. - {{/if}} -

+ + + +

+ {{#if (gt items.length 0)}} + No policies found + {{else}} + Welcome to Policies + {{/if}} +

+
+ +

+ {{#if (gt items.length 0)}} + No policies where found matching that search, or you may not have access to view the policies you are searching for. + {{else}} + There don't seem to be any policies, or you may not have access to view policies yet. + {{/if}} +

+
+ + + + +
- - - - -
-
-
+ + {{/let}} +{{/let}} +
+
{{/let}} - - {{/let}} \ No newline at end of file diff --git a/ui-v2/app/templates/dc/acls/roles/index.hbs b/ui-v2/app/templates/dc/acls/roles/index.hbs index 003abf01e..b37322536 100644 --- a/ui-v2/app/templates/dc/acls/roles/index.hbs +++ b/ui-v2/app/templates/dc/acls/roles/index.hbs @@ -35,46 +35,13 @@ {{#if (gt items.length 0) }} - - - - - - {{#let (from-entries (array - (array "Name:asc" "A to Z") - (array "Name:desc" "Z to A") - (array "CreateIndex:desc" "Newest to oldest") - (array "CreateIndex:asc" "Oldest to newest") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - -{{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - -{{/let}} - - - - + + @sort={{sort}} + @onsort={{action (mut sortBy) value="target.selected"}} + /> {{/if}} diff --git a/ui-v2/app/templates/dc/acls/tokens/index.hbs b/ui-v2/app/templates/dc/acls/tokens/index.hbs index cf226665c..d1fad80e1 100644 --- a/ui-v2/app/templates/dc/acls/tokens/index.hbs +++ b/ui-v2/app/templates/dc/acls/tokens/index.hbs @@ -4,113 +4,97 @@ {{title 'Access Controls'}} {{/if}} -{{#let (or sortBy "CreateTime:desc") as |sort|}} - - - {{partial 'dc/acls/tokens/notifications'}} - - -

- Access Controls -

-
- -{{#if isAuthorized }} - {{partial 'dc/acls/nav'}} -{{/if}} - - - {{partial 'dc/acls/disabled'}} - - - {{partial 'dc/acls/authorization'}} - - - Create - - - {{#if (gt items.length 0)}} - - - - - - {{#let (from-entries (array - (array "CreateTime:desc" "Newest to oldest") - (array "CreateTime:asc" "Oldest to newest") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - +{{#let (hash + types=(if type (split type ',') undefined) +) as |filters|}} + {{#let (or sortBy "CreateTime:desc") as |sort|}} + + + {{partial 'dc/acls/tokens/notifications'}} + + +

+ Access Controls +

+
+ + {{#if isAuthorized }} + {{partial 'dc/acls/nav'}} + {{/if}} + + + {{partial 'dc/acls/disabled'}} + + + {{partial 'dc/acls/authorization'}} + + + Create + + + {{#if (gt items.length 0)}} + + {{/if}} + + + {{#if (token/is-legacy items)}} +

Update. We have upgraded our ACL System to allow the creation of reusable policies that can be applied to tokens. Read more about the changes and how to upgrade legacy tokens in our documentation.

+ {{/if}} + {{#let (filter (filter-predicate 'token' filters) items) as |filtered|}} + {{#let (sort-by (comparator 'token' sort) filtered) as |sorted|}} + + + + + + + +

+ {{#if (gt items.length 0)}} + No tokens found + {{else}} + Welcome to ACL Tokens + {{/if}} +

+
+ +

+ {{#if (gt items.length 0)}} + No tokens where found matching that search, or you may not have access to view the tokens you are searching for. + {{else}} + There don't seem to be any tokens, or you may not have access to view tokens yet. + {{/if}} +

+
+
+
+
+ {{/let}} {{/let}} -
-
-
-
- {{/if}} -
- -{{#if (token/is-legacy items)}} -

Update. We have upgraded our ACL System to allow the creation of reusable policies that can be applied to tokens. Read more about the changes and how to upgrade legacy tokens in our documentation.

-{{/if}} - {{#let (sort-by (comparator 'token' sort) items) as |sorted|}} - - - - - - -

- {{#if (gt items.length 0)}} - No tokens found - {{else}} - Welcome to ACL Tokens - {{/if}} -

-
- -

- {{#if (gt items.length 0)}} - No tokens where found matching that search, or you may not have access to view the tokens you are searching for. - {{else}} - There don't seem to be any tokens, or you may not have access to view tokens yet. - {{/if}} -

-
-
-
-
+
{{/let}} -
- {{/let}} diff --git a/ui-v2/app/templates/dc/intentions/index.hbs b/ui-v2/app/templates/dc/intentions/index.hbs index 57a19a583..80047c2dd 100644 --- a/ui-v2/app/templates/dc/intentions/index.hbs +++ b/ui-v2/app/templates/dc/intentions/index.hbs @@ -6,11 +6,15 @@ +{{#let api.data as |items|}} + {{#let (hash + accesses=(if access (split access ',') undefined) + ) as |filters|}} {{#let (or sortBy "Action:asc") as |sort|}}

- Intentions {{format-number api.data.length}} total + Intentions {{format-number items.length}} total

@@ -18,73 +22,34 @@ Create
- {{#if (gt api.data.length 0) }} - - - - - - {{#let (from-entries (array - (array "Action:asc" "Allow to Deny") - (array "Action:desc" "Deny to Allow") - (array "SourceName:asc" "Source: A to Z") - (array "SourceName:desc" "Source: Z to A") - (array "DestinationName:asc" "Destination: A to Z") - (array "DestinationName:desc" "Destination: Z to A") - (array "Precedence:asc" "Precedence: Ascending") - (array "Precedence:desc" "Precedence: Descending") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - - - - - - - - - {{/let}} - - - - - {{/if}} + + @sort={{sort}} + @onsort={{action (mut sortBy) value="target.selected"}} + + @filter={{filters}} + @onfilter={{hash + access=(action (mut access) value="target.selectedItems") + }} + /> + {{/if}} - {{#let (sort-by (comparator 'intention' sort) api.data) as |sorted|}} + {{#let (filter (filter-predicate 'intention' filters) items) as |filtered|}} + {{#let (sort-by (comparator 'intention' sort) filtered) as |sorted|}} - +

- {{#if (gt api.data.length 0)}} + {{#if (gt items.length 0)}} No intentions found {{else}} Welcome to Intentions @@ -93,7 +58,7 @@

- {{#if (gt api.data.length 0)}} + {{#if (gt items.length 0)}} No intentions where found matching that search, or you may not have access to view the intentions you are searching for. {{else}} There don't seem to be any intentions, or you may not have access to view intentions yet. @@ -112,9 +77,12 @@ - {{/let}} + {{/let}} + {{/let}} - {{/let}} + {{/let}} + {{/let}} +{{/let}} \ No newline at end of file diff --git a/ui-v2/app/templates/dc/kv/index.hbs b/ui-v2/app/templates/dc/kv/index.hbs index 013abf9bc..b21cdafd3 100644 --- a/ui-v2/app/templates/dc/kv/index.hbs +++ b/ui-v2/app/templates/dc/kv/index.hbs @@ -23,46 +23,48 @@ {{#if (gt items.length 0) }} - - - - - - {{#let (from-entries (array - (array "Key:asc" "A to Z") - (array "Key:desc" "Z to A") - (array "isFolder:desc" "Folders to Keys") - (array "isFolder:asc" "Keys to Folders") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - {{/let}} - - - - +

+ +
+ + + + {{#let (from-entries (array + (array "Key:asc" "A to Z") + (array "Key:desc" "Z to A") + (array "isFolder:desc" "Folders to Keys") + (array "isFolder:asc" "Keys to Folders") + )) + as |selectable| + }} + {{get selectable sort}} + {{/let}} + + + + {{#let components.Optgroup components.Option as |Optgroup Option|}} + + + + + + + + + {{/let}} + + +
+ {{/if}}
diff --git a/ui-v2/app/templates/dc/nodes/index.hbs b/ui-v2/app/templates/dc/nodes/index.hbs index c5b38372b..558b18205 100644 --- a/ui-v2/app/templates/dc/nodes/index.hbs +++ b/ui-v2/app/templates/dc/nodes/index.hbs @@ -1,5 +1,8 @@ {{title 'Nodes'}} +{{#let (hash + statuses=(if status (split status ',') undefined) +) as |filters|}} {{#let (or sortBy "Node:asc") as |sort|}} @@ -10,53 +13,26 @@ {{#if (gt items.length 0) }} - - - - - - {{#let (from-entries (array - (array "Node:asc" "A to Z") - (array "Node:desc" "Z to A") - (array "Status:asc" "Unhealthy to Healthy") - (array "Status:desc" "Healthy to Unhealthy") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - {{/let}} - - - - + {{/if}} - {{#let (sort-by (comparator 'node' sort) items) as |sorted|}} +{{#let (filter (filter-predicate 'node' filters) items) as |filtered|}} + {{#let (sort-by (comparator 'node' sort) filtered) as |sorted|}} - - + + @@ -68,7 +44,9 @@ - {{/let}} + {{/let}} +{{/let}} + {{/let}} {{/let}} \ No newline at end of file diff --git a/ui-v2/app/templates/dc/nspaces/index.hbs b/ui-v2/app/templates/dc/nspaces/index.hbs index a062ba1bb..034d71331 100644 --- a/ui-v2/app/templates/dc/nspaces/index.hbs +++ b/ui-v2/app/templates/dc/nspaces/index.hbs @@ -15,40 +15,14 @@ {{#if (gt items.length 0)}} - - - - - - {{#let (from-entries (array - (array "Name:asc" "A to Z") - (array "Name:desc" "Z to A") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - -{{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - -{{/let}} - - - - + {{/if}} diff --git a/ui-v2/app/templates/dc/services/index.hbs b/ui-v2/app/templates/dc/services/index.hbs index b9395b31d..3eacce3fc 100644 --- a/ui-v2/app/templates/dc/services/index.hbs +++ b/ui-v2/app/templates/dc/services/index.hbs @@ -1,65 +1,46 @@ {{title 'Services'}} -{{#let (or sortBy "Name:asc") as |sort|}} +{{#let (hash + statuses=(if status (split status ',') undefined) + types=(if type (split type ',') undefined) + sources=(if source (split source ',') undefined) +) as |filters|}} + {{#let (or sortBy "Name:asc") as |sort|}} {{partial 'dc/services/notifications'}}

- Services {{format-number services.length}} total + Services {{format-number services.length}} total

{{#if (gt services.length 0) }} - - - - - - {{#let (from-entries (array - (array "Name:asc" "A to Z") - (array "Name:desc" "Z to A") - (array "Status:asc" "Unhealthy to Healthy") - (array "Status:desc" "Healthy to Unhealthy") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - {{/let}} - - - - + + @sort={{sort}} + @onsort={{action (mut sortBy) value="target.selected"}} + + @filter={{filters}} + @onfilter={{hash + status=(action (mut status) value="target.selectedItems") + type=(action (mut type) value="target.selectedItems") + source=(action (mut source) value="target.selectedItems") + }} + /> {{/if}} -{{#let (sort-by (comparator 'service' sort) services) as |sorted|}} +{{#let (filter (filter-predicate 'service' filters) services) as |filtered|}} + {{#let (sort-by (comparator 'service' sort) filtered) as |sorted|}} - - + + @@ -92,7 +73,9 @@ - {{/let}} + {{/let}} +{{/let}}
+ {{/let}} {{/let}} diff --git a/ui-v2/app/templates/dc/services/show/instances.hbs b/ui-v2/app/templates/dc/services/show/instances.hbs index b919a1cd8..369420aa3 100644 --- a/ui-v2/app/templates/dc/services/show/instances.hbs +++ b/ui-v2/app/templates/dc/services/show/instances.hbs @@ -1,15 +1,32 @@
- {{#if (gt items.length 0) }} +{{#let (hash + statuses=(if status (split status ',') undefined) + sources=(if source (split source ',') undefined) +) as |filters|}} + {{#let (or sortBy "Name:asc") as |sort|}} + {{#if (gt items.length 0) }} - - {{/if}} - - - + + @sort={{sort}} + @onsort={{action (mut sortBy) value="target.selected"}} + + @filter={{filters}} + @onfilter={{hash + status=(action (mut status) value="target.selectedItems") + source=(action (mut source) value="target.selectedItems") + }} + /> + {{/if}} +{{#let (filter (filter-predicate 'serviceInstance' filters) items) as |filtered|}} + {{#let (sort-by (comparator 'serviceInstance' sort) filtered) as |sorted|}} + + + @@ -21,5 +38,9 @@ + {{/let}} +{{/let}} + {{/let}} +{{/let}}
diff --git a/ui-v2/app/templates/dc/services/show/intentions/index.hbs b/ui-v2/app/templates/dc/services/show/intentions/index.hbs index 0849ca5b1..40aa59945 100644 --- a/ui-v2/app/templates/dc/services/show/intentions/index.hbs +++ b/ui-v2/app/templates/dc/services/show/intentions/index.hbs @@ -3,87 +3,55 @@
+{{#let api.data as |items|}} + {{#let (hash + accesses=(if access (split access ',') undefined) + ) as |filters|}} {{#let (or sortBy "Action:asc") as |sort|}}
Create - {{#if (gt api.data.length 0) }} - - - - - - {{#let (from-entries (array - (array "Action:asc" "Allow to Deny") - (array "Action:desc" "Deny to Allow") - (array "SourceName:asc" "Source: A to Z") - (array "SourceName:desc" "Source: Z to A") - (array "DestinationName:asc" "Destination: A to Z") - (array "DestinationName:desc" "Destination: Z to A") - (array "Precedence:asc" "Precedence: Ascending") - (array "Precedence:desc" "Precedence: Descending") - )) - as |selectable| - }} - {{get selectable sort}} - {{/let}} - - - - {{#let components.Optgroup components.Option as |Optgroup Option|}} - - - - - - - - - - - - - - - - - {{/let}} - - - - - {{/if}} - {{#let (sort-by (comparator 'intention' sort) api.data) as |sorted|}} +{{#if (gt items.length 0) }} + +{{/if}} +{{#let (filter (filter-predicate 'intention' filters) items) as |filtered|}} + {{#let (sort-by (comparator 'intention' sort) filtered) as |sorted|}} - +

- There are no intentions {{if (gt intentions.length 0) 'found '}} for this service. + There are no intentions {{if (gt items.length 0) 'found '}} for this service.

- {{/let}} + {{/let}} +{{/let}}
- {{/let}} + {{/let}} + {{/let}} +{{/let}}
diff --git a/ui-v2/app/templates/dc/services/show/services.hbs b/ui-v2/app/templates/dc/services/show/services.hbs index 4b973dc2b..857f21efd 100644 --- a/ui-v2/app/templates/dc/services/show/services.hbs +++ b/ui-v2/app/templates/dc/services/show/services.hbs @@ -1,22 +1,50 @@
- {{#if (gt gatewayServices.length 0)}} -

- The following services may receive traffic from external services through this gateway. Learn more about configuring gateways in our - step-by-step guide. -

- - {{else}} - - -

- There are no linked services. -

-
-
- {{/if}} +{{#let (hash + instances=(if instance (split instance ',') undefined) +) as |filters|}} + {{#let (or sortBy "Name:asc") as |sort|}} +{{#if (gt gatewayServices.length 0)}} + + +{{/if}} +

+ The following services may receive traffic from external services through this gateway. Learn more about configuring gateways in our + step-by-step guide. +

+{{#let (filter (filter-predicate 'service' filters) gatewayServices) as |filtered|}} + {{#let (sort-by (comparator 'service' sort) filtered) as |sorted|}} + + + + + + + +

+ There are no linked services. +

+
+
+
+
+ {{/let}} +{{/let}} + {{/let}} +{{/let}}
diff --git a/ui-v2/app/templates/dc/services/show/upstreams.hbs b/ui-v2/app/templates/dc/services/show/upstreams.hbs index d30009761..0a376080c 100644 --- a/ui-v2/app/templates/dc/services/show/upstreams.hbs +++ b/ui-v2/app/templates/dc/services/show/upstreams.hbs @@ -1,18 +1,49 @@
- {{#if (gt gatewayServices.length 0)}} +{{#let (hash + instances=(if instance (split instance ',') undefined) +) as |filters|}} + {{#let (or sortBy "Name:asc") as |sort|}} +{{#if (gt gatewayServices.length 0)}} + + +{{/if}}

Upstreams are services that may receive traffic from this gateway. Learn more about configuring gateways in our documentation.

- - {{else}} - - -

- There are no upstreams. -

-
-
- {{/if}} +{{#let (filter (filter-predicate 'service' filters) gatewayServices) as |filtered|}} + {{#let (sort-by (comparator 'service' sort) filtered) as |sorted|}} + + + + + + + +

+ There are no upstreams. +

+
+
+
+
+ {{/let}} +{{/let}} + {{/let}} +{{/let}}
diff --git a/ui-v2/tests/acceptance/dc/acls/policies/sorting.feature b/ui-v2/tests/acceptance/dc/acls/policies/sorting.feature index a2b369268..7ab40d649 100644 --- a/ui-v2/tests/acceptance/dc/acls/policies/sorting.feature +++ b/ui-v2/tests/acceptance/dc/acls/policies/sorting.feature @@ -1,36 +1,36 @@ -@setupApplicationTest -Feature: dc / acls / policies / sorting - Scenario: Sorting Policies - Given 1 datacenter model with the value "dc-1" - And 4 policy models from yaml - --- - - Name: "system-A" - - Name: "system-D" - - Name: "system-C" - - Name: "system-B" - --- - When I visit the policies page for yaml - --- - dc: dc-1 - --- - Then the url should be /dc-1/acls/policies - Then I see 4 policy models - When I click selected on the sort - When I click options.1.button on the sort - Then I see name on the policies vertically like yaml - --- - - "system-D" - - "system-C" - - "system-B" - - "system-A" - --- - When I click selected on the sort - When I click options.0.button on the sort - Then I see name on the policies vertically like yaml - --- - - "system-A" - - "system-B" - - "system-C" - - "system-D" - --- - +@setupApplicationTest +Feature: dc / acls / policies / sorting + Scenario: Sorting Policies + Given 1 datacenter model with the value "dc-1" + And 4 policy models from yaml + --- + - Name: "system-A" + - Name: "system-D" + - Name: "system-C" + - Name: "system-B" + --- + When I visit the policies page for yaml + --- + dc: dc-1 + --- + Then the url should be /dc-1/acls/policies + Then I see 4 policy models + When I click selected on the sort + When I click options.1.button on the sort + Then I see name on the policies vertically like yaml + --- + - "system-D" + - "system-C" + - "system-B" + - "system-A" + --- + When I click selected on the sort + When I click options.0.button on the sort + Then I see name on the policies vertically like yaml + --- + - "system-A" + - "system-B" + - "system-C" + - "system-D" + --- + diff --git a/ui-v2/tests/acceptance/dc/nodes/sorting.feature b/ui-v2/tests/acceptance/dc/nodes/sorting.feature index d23795d92..dc09fec40 100644 --- a/ui-v2/tests/acceptance/dc/nodes/sorting.feature +++ b/ui-v2/tests/acceptance/dc/nodes/sorting.feature @@ -1,73 +1,73 @@ -@setupApplicationTest -Feature: dc / nodes / sorting - Scenario: - Given 1 datacenter model with the value "dc-1" - And 6 node models from yaml - --- - - Node: Node-A - Checks: - - Status: critical - - Node: Node-B - Checks: - - Status: passing - - Node: Node-C - Checks: - - Status: warning - - Node: Node-D - Checks: - - Status: critical - - Node: Node-E - Checks: - - Status: critical - - Node: Node-F - Checks: - - Status: warning - --- - When I visit the nodes page for yaml - --- - dc: dc-1 - --- - When I click selected on the sort - When I click options.3.button on the sort - Then I see name on the nodes vertically like yaml - --- - - Node-B - - Node-C - - Node-F - - Node-A - - Node-D - - Node-E - --- - When I click selected on the sort - When I click options.2.button on the sort - Then I see name on the nodes vertically like yaml - --- - - Node-A - - Node-D - - Node-E - - Node-C - - Node-F - - Node-B - --- - When I click selected on the sort - When I click options.0.button on the sort - Then I see name on the nodes vertically like yaml - --- - - Node-A - - Node-B - - Node-C - - Node-D - - Node-E - - Node-F - --- - When I click selected on the sort - When I click options.1.button on the sort - Then I see name on the nodes vertically like yaml - --- - - Node-F - - Node-E - - Node-D - - Node-C - - Node-B - - Node-A - --- +@setupApplicationTest +Feature: dc / nodes / sorting + Scenario: + Given 1 datacenter model with the value "dc-1" + And 6 node models from yaml + --- + - Node: Node-A + Checks: + - Status: critical + - Node: Node-B + Checks: + - Status: passing + - Node: Node-C + Checks: + - Status: warning + - Node: Node-D + Checks: + - Status: critical + - Node: Node-E + Checks: + - Status: critical + - Node: Node-F + Checks: + - Status: warning + --- + When I visit the nodes page for yaml + --- + dc: dc-1 + --- + When I click selected on the sort + When I click options.0.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-A + - Node-D + - Node-E + - Node-C + - Node-F + - Node-B + --- + When I click selected on the sort + When I click options.1.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-B + - Node-C + - Node-F + - Node-A + - Node-D + - Node-E + --- + When I click selected on the sort + When I click options.2.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-A + - Node-B + - Node-C + - Node-D + - Node-E + - Node-F + --- + When I click selected on the sort + When I click options.3.button on the sort + Then I see name on the nodes vertically like yaml + --- + - Node-F + - Node-E + - Node-D + - Node-C + - Node-B + - Node-A + --- diff --git a/ui-v2/tests/acceptance/dc/services/show/upstreams.feature b/ui-v2/tests/acceptance/dc/services/show/upstreams.feature index cc703e4a4..00dc65fc0 100644 --- a/ui-v2/tests/acceptance/dc/services/show/upstreams.feature +++ b/ui-v2/tests/acceptance/dc/services/show/upstreams.feature @@ -28,6 +28,7 @@ Feature: dc / services / show / upstreams --- And the title should be "ingress-gateway-1 - Consul" When I click upstreams on the tabs + And I see upstreamsIsSelected on the tabs Then I see 3 service models on the tabs.upstreamsTab component Scenario: Don't see the Upstreams tab Given 1 datacenter model with the value "dc1" diff --git a/ui-v2/tests/acceptance/dc/services/sorting.feature b/ui-v2/tests/acceptance/dc/services/sorting.feature index 99af3b233..b3826f9fc 100644 --- a/ui-v2/tests/acceptance/dc/services/sorting.feature +++ b/ui-v2/tests/acceptance/dc/services/sorting.feature @@ -40,15 +40,28 @@ Feature: dc / services / sorting dc: dc-1 --- When I click selected on the sort - When I click options.3.button on the sort + # unhealthy / healthy + When I click options.0.button on the sort Then I see name on the services vertically like yaml --- + - Service-B + - Service-C + - Service-A + - Service-D - Service-F - Service-E + --- + When I click selected on the sort + # healthy / unhealthy + When I click options.1.button on the sort + Then I see name on the services vertically like yaml + --- + - Service-E + - Service-F - Service-D + - Service-A - Service-C - Service-B - - Service-A --- When I click selected on the sort When I click options.2.button on the sort @@ -62,24 +75,13 @@ Feature: dc / services / sorting - Service-F --- When I click selected on the sort - When I click options.0.button on the sort + When I click options.3.button on the sort Then I see name on the services vertically like yaml --- - - Service-B - - Service-C - - Service-A - - Service-D - Service-F - Service-E - --- - When I click selected on the sort - When I click options.1.button on the sort - Then I see name on the services vertically like yaml - --- - - Service-E - - Service-F - Service-D - - Service-A - Service-C - Service-B + - Service-A --- diff --git a/ui-v2/tests/pages/dc/acls/policies/index.js b/ui-v2/tests/pages/dc/acls/policies/index.js index 40303efff..a17926d73 100644 --- a/ui-v2/tests/pages/dc/acls/policies/index.js +++ b/ui-v2/tests/pages/dc/acls/policies/index.js @@ -2,6 +2,6 @@ export default function(visitable, creatable, policies, popoverSelect) { return creatable({ visit: visitable('/:dc/acls/policies'), policies: policies(), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), }); } diff --git a/ui-v2/tests/pages/dc/acls/roles/index.js b/ui-v2/tests/pages/dc/acls/roles/index.js index 851fcbe19..67279ffc6 100644 --- a/ui-v2/tests/pages/dc/acls/roles/index.js +++ b/ui-v2/tests/pages/dc/acls/roles/index.js @@ -2,7 +2,7 @@ export default function(visitable, creatable, roles, popoverSelect) { return { visit: visitable('/:dc/acls/roles'), roles: roles(), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), ...creatable(), }; } diff --git a/ui-v2/tests/pages/dc/acls/tokens/index.js b/ui-v2/tests/pages/dc/acls/tokens/index.js index b08510794..0af210d6f 100644 --- a/ui-v2/tests/pages/dc/acls/tokens/index.js +++ b/ui-v2/tests/pages/dc/acls/tokens/index.js @@ -3,7 +3,7 @@ export default function(visitable, creatable, text, tokens, popoverSelect) { visit: visitable('/:dc/acls/tokens'), update: text('[data-test-notification-update]'), tokens: tokens(), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), ...creatable(), }; } diff --git a/ui-v2/tests/pages/dc/intentions/index.js b/ui-v2/tests/pages/dc/intentions/index.js index 2c292bab1..f554d1964 100644 --- a/ui-v2/tests/pages/dc/intentions/index.js +++ b/ui-v2/tests/pages/dc/intentions/index.js @@ -2,7 +2,7 @@ export default function(visitable, creatable, clickable, intentions, popoverSele return creatable({ visit: visitable('/:dc/intentions'), intentions: intentions(), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), create: clickable('[data-test-create]'), }); } diff --git a/ui-v2/tests/pages/dc/nodes/index.js b/ui-v2/tests/pages/dc/nodes/index.js index c26bbd415..81aabe3ff 100644 --- a/ui-v2/tests/pages/dc/nodes/index.js +++ b/ui-v2/tests/pages/dc/nodes/index.js @@ -8,6 +8,6 @@ export default function(visitable, text, clickable, attribute, collection, popov visit: visitable('/:dc/nodes'), nodes: collection('.consul-node-list [data-test-list-row]', node), home: clickable('[data-test-home]'), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), }; } diff --git a/ui-v2/tests/pages/dc/nspaces/index.js b/ui-v2/tests/pages/dc/nspaces/index.js index 96b1a6b7c..4a3e5d0fb 100644 --- a/ui-v2/tests/pages/dc/nspaces/index.js +++ b/ui-v2/tests/pages/dc/nspaces/index.js @@ -2,6 +2,6 @@ export default function(visitable, creatable, nspaces, popoverSelect) { return creatable({ visit: visitable('/:dc/namespaces'), nspaces: nspaces(), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), }); } diff --git a/ui-v2/tests/pages/dc/services/index.js b/ui-v2/tests/pages/dc/services/index.js index deac821ad..5ca6123a3 100644 --- a/ui-v2/tests/pages/dc/services/index.js +++ b/ui-v2/tests/pages/dc/services/index.js @@ -10,6 +10,6 @@ export default function(visitable, clickable, text, attribute, present, collecti visit: visitable('/:dc/services'), services: collection('.consul-service-list > ul > li:not(:first-child)', service), home: clickable('[data-test-home]'), - sort: popoverSelect(), + sort: popoverSelect('[data-test-sort-control]'), }; } diff --git a/ui-v2/tests/pages/dc/services/show.js b/ui-v2/tests/pages/dc/services/show.js index 0630edbe9..68cf5ea65 100644 --- a/ui-v2/tests/pages/dc/services/show.js +++ b/ui-v2/tests/pages/dc/services/show.js @@ -23,7 +23,7 @@ export default function(visitable, attribute, collection, text, intentions, filt intentions: intentions(), }; page.tabs.upstreamsTab = { - services: collection('.consul-upstream-list > ul > li:not(:first-child)', { + services: collection('.consul-service-list > ul > li:not(:first-child)', { name: text('[data-test-service-name]'), }), }; diff --git a/ui-v2/tests/unit/filter/predicates/intention-test.js b/ui-v2/tests/unit/filter/predicates/intention-test.js new file mode 100644 index 000000000..9fde7ea33 --- /dev/null +++ b/ui-v2/tests/unit/filter/predicates/intention-test.js @@ -0,0 +1,43 @@ +import factory from 'consul-ui/filter/predicates/intention'; +import { module, test } from 'qunit'; + +module('Unit | Filter | Predicates | intention', function() { + const predicate = factory(); + + test('it returns items depending on Action', function(assert) { + const items = [ + { + Action: 'allow', + }, + { + Action: 'deny', + }, + ]; + + let expected, actual; + + expected = [items[0]]; + actual = items.filter( + predicate({ + accesses: ['allow'], + }) + ); + assert.deepEqual(actual, expected); + + expected = [items[1]]; + actual = items.filter( + predicate({ + accesses: ['deny'], + }) + ); + assert.deepEqual(actual, expected); + + expected = items; + actual = items.filter( + predicate({ + accesses: ['allow', 'deny'], + }) + ); + assert.deepEqual(actual, expected); + }); +}); diff --git a/ui-v2/tests/unit/filter/predicates/service-test.js b/ui-v2/tests/unit/filter/predicates/service-test.js new file mode 100644 index 000000000..55d243610 --- /dev/null +++ b/ui-v2/tests/unit/filter/predicates/service-test.js @@ -0,0 +1,171 @@ +import factory from 'consul-ui/filter/predicates/service'; +import { module, test } from 'qunit'; + +module('Unit | Filter | Predicates | service', function() { + const predicate = factory(); + + test('it returns registered/unregistered items depending on instance count', function(assert) { + const items = [ + { + InstanceCount: 1, + }, + { + InstanceCount: 0, + }, + ]; + + let expected, actual; + + expected = [items[0]]; + actual = items.filter( + predicate({ + instances: ['registered'], + }) + ); + assert.deepEqual(actual, expected); + + expected = [items[1]]; + actual = items.filter( + predicate({ + instances: ['not-registered'], + }) + ); + assert.deepEqual(actual, expected); + + expected = items; + actual = items.filter( + predicate({ + instances: ['registered', 'not-registered'], + }) + ); + assert.deepEqual(actual, expected); + }); + + test('it returns items depending on status', function(assert) { + const items = [ + { + MeshStatus: 'passing', + }, + { + MeshStatus: 'warning', + }, + { + MeshStatus: 'critical', + }, + ]; + + let expected, actual; + + expected = [items[0]]; + actual = items.filter( + predicate({ + statuses: ['passing'], + }) + ); + assert.deepEqual(actual, expected); + + expected = [items[1]]; + actual = items.filter( + predicate({ + statuses: ['warning'], + }) + ); + assert.deepEqual(actual, expected); + + expected = items; + actual = items.filter( + predicate({ + statuses: ['passing', 'warning', 'critical'], + }) + ); + assert.deepEqual(actual, expected); + }); + + test('it returns items depending on service type', function(assert) { + const items = [ + { + Kind: 'ingress-gateway', + }, + { + Kind: 'mesh-gateway', + }, + {}, + ]; + + let expected, actual; + + expected = [items[0]]; + actual = items.filter( + predicate({ + types: ['ingress-gateway'], + }) + ); + assert.deepEqual(actual, expected); + + expected = [items[1]]; + actual = items.filter( + predicate({ + types: ['mesh-gateway'], + }) + ); + assert.deepEqual(actual, expected); + + expected = items; + actual = items.filter( + predicate({ + types: ['ingress-gateway', 'mesh-gateway', 'service'], + }) + ); + assert.deepEqual(actual, expected); + }); + test('it returns items depending on a mixture of properties', function(assert) { + const items = [ + { + Kind: 'ingress-gateway', + MeshStatus: 'passing', + InstanceCount: 1, + }, + { + Kind: 'mesh-gateway', + MeshStatus: 'warning', + InstanceCount: 1, + }, + { + MeshStatus: 'critical', + InstanceCount: 0, + }, + ]; + + let expected, actual; + + expected = [items[0]]; + actual = items.filter( + predicate({ + types: ['ingress-gateway'], + statuses: ['passing'], + instances: ['registered'], + }) + ); + assert.deepEqual(actual, expected); + + expected = [items[1]]; + actual = items.filter( + predicate({ + types: ['mesh-gateway'], + statuses: ['warning'], + instances: ['registered'], + }) + ); + assert.deepEqual(actual, expected); + + expected = items; + actual = items.filter( + predicate({ + types: ['ingress-gateway', 'mesh-gateway', 'service'], + statuses: ['passing', 'warning', 'critical'], + instances: ['registered', 'not-registered'], + }) + ); + assert.deepEqual(actual, expected); + }); +}); From e3fbebda016c9ea0532b337a0d568825c9676b5c Mon Sep 17 00:00:00 2001 From: Hans Hasselberg Date: Wed, 2 Sep 2020 14:15:41 +0200 Subject: [PATCH 18/73] add docs for dual stack options (#8407) --- website/pages/docs/agent/options.mdx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/pages/docs/agent/options.mdx b/website/pages/docs/agent/options.mdx index 95180f5d1..35591cb4c 100644 --- a/website/pages/docs/agent/options.mdx +++ b/website/pages/docs/agent/options.mdx @@ -824,8 +824,16 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." - `advertise_addr` Equivalent to the [`-advertise` command-line flag](#_advertise). +- `advertise_addr_ipv4` This was added together with [`advertise_addr_ipv6`](#advertise_addr_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_ipv6` This was added together with [`advertise_addr_ipv4`](#advertise_addr_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + - `advertise_addr_wan` Equivalent to the [`-advertise-wan` command-line flag](#_advertise-wan). +- `advertise_addr_wan_ipv4` This was added together with [`advertise_addr_wan_ipv6`](#advertise_addr_wan_ipv6) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + +- `advertise_addr_wan_ipv6` This was added together with [`advertise_addr_wan_ipv4`](#advertise_addr_wan_ipv4) to support dual stack IPv4/IPv6 environments. Using this, both IPv4 and IPv6 addresses can be specified and requested during eg service discovery. + - `serf_lan` ((#serf_lan_bind)) Equivalent to the [`-serf-lan-bind` command-line flag](#_serf_lan_bind). - `serf_lan_allowed_cidrs` ((#serf_lan_allowed_cidrs)) Equivalent to the [`-serf-lan-allowed-cidrs` command-line flag](#_serf_lan_allowed_cidrs). From 6ae3e3ffc7c0db342000ff9077535dc599481491 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 2 Sep 2020 14:45:06 +0100 Subject: [PATCH 19/73] ui: Delete unused javascript/CSS components (#8597) --- .../app/components/grid-collection/index.hbs | 7 -- ui-v2/app/components/grid-collection/index.js | 83 ------------------- .../app/components/healthcheck-info/index.hbs | 9 -- .../app/components/healthcheck-info/index.js | 4 - .../components/healthcheck-status/index.hbs | 3 - .../components/healthcheck-status/index.js | 12 --- .../healthchecked-resource/index.hbs | 38 --------- .../healthchecked-resource/index.js | 29 ------- ui-v2/app/styles/base/components/index.scss | 1 - .../base/components/stats-card/index.scss | 2 - .../base/components/stats-card/layout.scss | 53 ------------ .../base/components/stats-card/skin.scss | 34 -------- ui-v2/app/styles/components.scss | 3 - .../styles/components/grid-collection.scss | 42 ---------- .../styles/components/healthcheck-info.scss | 12 --- .../components/healthcheck-info/index.scss | 2 - .../components/healthcheck-info/layout.scss | 17 ---- .../components/healthcheck-info/skin.scss | 30 ------- .../components/healthchecked-resource.scss | 53 ------------ .../styles/components/list-collection.scss | 2 + .../components/healthcheck-info-test.js | 24 ------ .../components/healthcheck-status-test.js | 22 ----- .../components/healthchecked-resource-test.js | 32 ------- 23 files changed, 2 insertions(+), 512 deletions(-) delete mode 100644 ui-v2/app/components/grid-collection/index.hbs delete mode 100644 ui-v2/app/components/grid-collection/index.js delete mode 100644 ui-v2/app/components/healthcheck-info/index.hbs delete mode 100644 ui-v2/app/components/healthcheck-info/index.js delete mode 100644 ui-v2/app/components/healthcheck-status/index.hbs delete mode 100644 ui-v2/app/components/healthcheck-status/index.js delete mode 100644 ui-v2/app/components/healthchecked-resource/index.hbs delete mode 100644 ui-v2/app/components/healthchecked-resource/index.js delete mode 100644 ui-v2/app/styles/base/components/stats-card/index.scss delete mode 100644 ui-v2/app/styles/base/components/stats-card/layout.scss delete mode 100644 ui-v2/app/styles/base/components/stats-card/skin.scss delete mode 100644 ui-v2/app/styles/components/grid-collection.scss delete mode 100644 ui-v2/app/styles/components/healthcheck-info.scss delete mode 100644 ui-v2/app/styles/components/healthcheck-info/index.scss delete mode 100644 ui-v2/app/styles/components/healthcheck-info/layout.scss delete mode 100644 ui-v2/app/styles/components/healthcheck-info/skin.scss delete mode 100644 ui-v2/app/styles/components/healthchecked-resource.scss delete mode 100644 ui-v2/tests/integration/components/healthcheck-info-test.js delete mode 100644 ui-v2/tests/integration/components/healthcheck-status-test.js delete mode 100644 ui-v2/tests/integration/components/healthchecked-resource-test.js diff --git a/ui-v2/app/components/grid-collection/index.hbs b/ui-v2/app/components/grid-collection/index.hbs deleted file mode 100644 index a7835ba1b..000000000 --- a/ui-v2/app/components/grid-collection/index.hbs +++ /dev/null @@ -1,7 +0,0 @@ -{{on-window 'resize' (action "resize") }} - -
  • - {{~#each _cells as |cell|~}} -
  • {{yield cell.item cell.index }}
  • - {{~/each~}} -
    \ No newline at end of file diff --git a/ui-v2/app/components/grid-collection/index.js b/ui-v2/app/components/grid-collection/index.js deleted file mode 100644 index 00970de1e..000000000 --- a/ui-v2/app/components/grid-collection/index.js +++ /dev/null @@ -1,83 +0,0 @@ -import { inject as service } from '@ember/service'; -import { computed, get, set } from '@ember/object'; -import Component from 'ember-collection/components/ember-collection'; -import PercentageColumns from 'ember-collection/layouts/percentage-columns'; -import style from 'ember-computed-style'; - -export default Component.extend({ - dom: service('dom'), - tagName: 'div', - attributeBindings: ['style'], - height: 500, - cellHeight: 113, - style: style('getStyle'), - classNames: ['grid-collection'], - init: function() { - this._super(...arguments); - this.columns = [25, 25, 25, 25]; - }, - didInsertElement: function() { - this._super(...arguments); - this.actions.resize.apply(this, [{ target: this.dom.viewport() }]); - }, - didReceiveAttrs: function() { - this._super(...arguments); - this._cellLayout = this['cell-layout'] = new PercentageColumns( - get(this, 'items.length'), - get(this, 'columns'), - get(this, 'cellHeight') - ); - }, - getStyle: computed('height', function() { - return { - height: get(this, 'height'), - }; - }), - actions: { - resize: function(e) { - // TODO: This top part is very similar to resize in tabular-collection - // see if it make sense to DRY out - const dom = get(this, 'dom'); - const $appContent = dom.element('main > div'); - if ($appContent) { - const rect = this.element.getBoundingClientRect(); - const $footer = dom.element('footer[role="contentinfo"]'); - const space = rect.top + $footer.clientHeight; - const height = e.target.innerHeight - space; - this.set('height', Math.max(0, height)); - this.updateItems(); - this.updateScrollPosition(); - } - const width = e.target.innerWidth; - const len = get(this, 'columns.length'); - switch (true) { - case width > 1013: - if (len != 4) { - set(this, 'columns', [25, 25, 25, 25]); - } - break; - case width > 744: - if (len != 3) { - set(this, 'columns', [33, 33, 34]); - } - break; - case width > 487: - if (len != 2) { - set(this, 'columns', [50, 50]); - } - break; - case width < 488: - if (len != 1) { - set(this, 'columns', [100]); - } - } - if (len !== get(this, 'columns.length')) { - this._cellLayout = this['cell-layout'] = new PercentageColumns( - get(this, 'items.length'), - get(this, 'columns'), - get(this, 'cellHeight') - ); - } - }, - }, -}); diff --git a/ui-v2/app/components/healthcheck-info/index.hbs b/ui-v2/app/components/healthcheck-info/index.hbs deleted file mode 100644 index 5661472fc..000000000 --- a/ui-v2/app/components/healthcheck-info/index.hbs +++ /dev/null @@ -1,9 +0,0 @@ -{{#if (and (lt passing 1) (lt warning 1) (lt critical 1) )}} - 0 -{{else}} -
    - - - -
    -{{/if}} diff --git a/ui-v2/app/components/healthcheck-info/index.js b/ui-v2/app/components/healthcheck-info/index.js deleted file mode 100644 index abe1ccedb..000000000 --- a/ui-v2/app/components/healthcheck-info/index.js +++ /dev/null @@ -1,4 +0,0 @@ -import Component from '@ember/component'; -export default Component.extend({ - tagName: '', -}); diff --git a/ui-v2/app/components/healthcheck-status/index.hbs b/ui-v2/app/components/healthcheck-status/index.hbs deleted file mode 100644 index 383f67386..000000000 --- a/ui-v2/app/components/healthcheck-status/index.hbs +++ /dev/null @@ -1,3 +0,0 @@ -{{!-- we use concat here to avoid ember adding returns between words, which causes a layout issue--}} -
    {{ concat 'Healthchecks ' (capitalize name) }}
    -
    {{format-number count}}
    \ No newline at end of file diff --git a/ui-v2/app/components/healthcheck-status/index.js b/ui-v2/app/components/healthcheck-status/index.js deleted file mode 100644 index e2485aee2..000000000 --- a/ui-v2/app/components/healthcheck-status/index.js +++ /dev/null @@ -1,12 +0,0 @@ -import Component from '@ember/component'; -import { computed } from '@ember/object'; -export default Component.extend({ - tagName: '', - count: computed('value', function() { - const value = this.value; - if (Array.isArray(value)) { - return value.length; - } - return value; - }), -}); diff --git a/ui-v2/app/components/healthchecked-resource/index.hbs b/ui-v2/app/components/healthchecked-resource/index.hbs deleted file mode 100644 index 19d7f4909..000000000 --- a/ui-v2/app/components/healthchecked-resource/index.hbs +++ /dev/null @@ -1,38 +0,0 @@ - - {{yield}} - - {{#if (eq checks.length 0)}} - {{checks.length}} - {{else if (eq checks.length healthy.length)}} - {{healthy.length}} - {{/if}} - - - - {{name}} - {{address}} - - - - {{#if (not-eq checks.length healthy.length)}} - - {{/if}} - - diff --git a/ui-v2/app/components/healthchecked-resource/index.js b/ui-v2/app/components/healthchecked-resource/index.js deleted file mode 100644 index 73fe18715..000000000 --- a/ui-v2/app/components/healthchecked-resource/index.js +++ /dev/null @@ -1,29 +0,0 @@ -import { filter } from '@ember/object/computed'; -import Component from '@ember/component'; -import { computed, get } from '@ember/object'; -import style from 'ember-computed-style'; -export default Component.extend({ - classNames: ['healthchecked-resource'], - attributeBindings: ['style'], - style: style('gridRowEnd'), - unhealthy: filter(`checks.@each.Status`, function(item) { - const status = get(item, 'Status'); - return status === 'critical' || status === 'warning'; - }), - healthy: filter(`checks.@each.Status`, function(item) { - const status = get(item, 'Status'); - return status === 'passing'; - }), - gridRowEnd: computed('UnhealthyChecks', function() { - let spans = 3; - if (get(this, 'service')) { - spans++; - } - if (get(this, 'healthy.length') > 0) { - spans++; - } - return { - gridRow: `auto / span ${spans + (get(this, 'unhealthy.length') || 0)}`, - }; - }), -}); diff --git a/ui-v2/app/styles/base/components/index.scss b/ui-v2/app/styles/base/components/index.scss index e4017d5ee..da964526c 100644 --- a/ui-v2/app/styles/base/components/index.scss +++ b/ui-v2/app/styles/base/components/index.scss @@ -13,7 +13,6 @@ @import './popover-menu/index'; @import './radio-group/index'; @import './sliding-toggle/index'; -@import './stats-card/index'; @import './table/index'; @import './tabs/index'; @import './toggle-button/index'; diff --git a/ui-v2/app/styles/base/components/stats-card/index.scss b/ui-v2/app/styles/base/components/stats-card/index.scss deleted file mode 100644 index bc1825219..000000000 --- a/ui-v2/app/styles/base/components/stats-card/index.scss +++ /dev/null @@ -1,2 +0,0 @@ -@import './skin'; -@import './layout'; diff --git a/ui-v2/app/styles/base/components/stats-card/layout.scss b/ui-v2/app/styles/base/components/stats-card/layout.scss deleted file mode 100644 index 7068b26e7..000000000 --- a/ui-v2/app/styles/base/components/stats-card/layout.scss +++ /dev/null @@ -1,53 +0,0 @@ -%stats-card { - position: relative; -} -%stats-card header a, -%stats-card header a > * { - display: block; -} -%stats-card header a > *, -%stats-card li a > :last-child { - /* TODO: %truncate */ - overflow: hidden; - white-space: nowrap; - text-overflow: ellipsis; -} -%stats-card header a { - padding: 12px 15px; -} -%stats-card header > :not(a) { - @extend %stats-card-icon; -} -%stats-card-icon { - display: inline-flex; - align-items: center; -} -%stats-card-icon:last-child { - position: absolute; - background-size: 16px; - background-position: 5px 5px; - font-size: 1.5em; - width: 28px; - height: 28px; - top: calc(-28px / 2); - left: 15px; - font-size: 0; -} -%stats-card-icon:first-child { - float: right; - padding-left: 30px; - height: 16px; - margin-top: 15px; - margin-right: 15px; -} - -%stats-card li { - height: 33px; -} -%stats-card li a { - display: flex; - vertical-align: text-top; - align-items: center; - padding: 0 15px 0 12px; - height: 100%; -} diff --git a/ui-v2/app/styles/base/components/stats-card/skin.scss b/ui-v2/app/styles/base/components/stats-card/skin.scss deleted file mode 100644 index 14f552fdb..000000000 --- a/ui-v2/app/styles/base/components/stats-card/skin.scss +++ /dev/null @@ -1,34 +0,0 @@ -%stats-card { - border: $decor-border-100; - border-radius: $decor-radius-100; -} -%stats-card li { - border-top: $decor-border-100; -} -%stats-card, -%stats-card li { - border-color: $gray-200; -} -%stats-card a { - color: $gray-900; -} -%stats-card, -%stats-card header::before { - box-shadow: $decor-elevation-300; -} -%stats-card:hover, -%stats-card:focus { - box-shadow: $decor-elevation-400; -} -%stats-card header > :not(a):last-child { - border: $decor-border-100; - border-radius: 100%; - border-color: $gray-200; - background-color: $white; -} -%stats-card ul { - /*TODO: %list-style-none?*/ - list-style-type: none; - margin: 0; - padding: 0; -} diff --git a/ui-v2/app/styles/components.scss b/ui-v2/app/styles/components.scss index 77337360c..20100e9e7 100644 --- a/ui-v2/app/styles/components.scss +++ b/ui-v2/app/styles/components.scss @@ -16,8 +16,6 @@ @import './components/tooltip'; @import './components/tag-list'; @import './components/healthcheck-output'; -@import './components/healthcheck-info'; -@import './components/healthchecked-resource'; @import './components/freetext-filter'; @import './components/filter-bar'; @import './components/tomography-graph'; @@ -34,7 +32,6 @@ @import './components/tabular-details'; @import './components/tabular-collection'; @import './components/list-collection'; -@import './components/grid-collection'; @import './components/popover-select'; @import './components/tooltip-panel'; @import './components/menu-panel'; diff --git a/ui-v2/app/styles/components/grid-collection.scss b/ui-v2/app/styles/components/grid-collection.scss deleted file mode 100644 index 108c3820a..000000000 --- a/ui-v2/app/styles/components/grid-collection.scss +++ /dev/null @@ -1,42 +0,0 @@ -.unhealthy > div, -.healthy > div { - @extend %card-grid; -} -.grid-collection { - height: 500px; - position: relative; -} -.healthy > div { - width: calc(100% + 23px); - min-height: 500px; -} -.unhealthy > div { - margin-bottom: 20px; -} -.healthy > div > ul > li { - padding-right: 23px; - padding-bottom: 20px; -} -%card-grid > ul, -%card-grid > ol { - list-style-type: none; - display: grid; - grid-auto-rows: 12px; -} -%card-grid li.empty { - grid-column: 1 / -1; -} -@media #{$--fixed-grid} { - %card-grid > ul, - %card-grid > ol { - grid-gap: 20px 20px; - grid-template-columns: repeat(4, minmax(220px, 1fr)); - } -} -@media #{$--lt-fixed-grid} { - %card-grid > ul, - %card-grid > ol { - grid-template-columns: repeat(auto-fill, minmax(220px, 1fr)); - grid-gap: 20px 2%; - } -} diff --git a/ui-v2/app/styles/components/healthcheck-info.scss b/ui-v2/app/styles/components/healthcheck-info.scss deleted file mode 100644 index 5207349bb..000000000 --- a/ui-v2/app/styles/components/healthcheck-info.scss +++ /dev/null @@ -1,12 +0,0 @@ -@import './healthcheck-info/index'; -%table tr .healthcheck-info { - @extend %healthcheck-info; -} -// TODO: Look at why we can't have the zeros in the healthcheck-info -%table td span.zero { - @extend %with-minus-square-fill-color-icon; - background-position: left center; - display: block; - text-indent: 20px; - color: $gray-400; -} diff --git a/ui-v2/app/styles/components/healthcheck-info/index.scss b/ui-v2/app/styles/components/healthcheck-info/index.scss deleted file mode 100644 index bc1825219..000000000 --- a/ui-v2/app/styles/components/healthcheck-info/index.scss +++ /dev/null @@ -1,2 +0,0 @@ -@import './skin'; -@import './layout'; diff --git a/ui-v2/app/styles/components/healthcheck-info/layout.scss b/ui-v2/app/styles/components/healthcheck-info/layout.scss deleted file mode 100644 index a9ec45a42..000000000 --- a/ui-v2/app/styles/components/healthcheck-info/layout.scss +++ /dev/null @@ -1,17 +0,0 @@ -%healthcheck-info { - display: inline-flex; -} -%healthcheck-info > * { - display: block; -} -%healthcheck-info dt.zero { - display: none; -} -%healthcheck-info dd.zero { - visibility: hidden; -} -%healthcheck-info dd { - box-sizing: content-box; - margin-left: 22px; - padding-right: 10px; -} diff --git a/ui-v2/app/styles/components/healthcheck-info/skin.scss b/ui-v2/app/styles/components/healthcheck-info/skin.scss deleted file mode 100644 index 5f268ab54..000000000 --- a/ui-v2/app/styles/components/healthcheck-info/skin.scss +++ /dev/null @@ -1,30 +0,0 @@ -%healthcheck-info dt { - font-size: 0; -} -%healthcheck-info dt::before { - @extend %as-pseudo; - position: absolute; - width: 18px; - height: 18px; -} -%healthcheck-info dt.passing::before { - @extend %with-check-circle-fill-color-icon; -} -%healthcheck-info dt.warning::before { - @extend %with-alert-triangle-color-icon; -} -%healthcheck-info dt.critical::before { - @extend %with-cancel-square-fill-color-icon; -} -%healthcheck-info dt.passing, -%healthcheck-info dt.passing + dd { - color: $color-success; -} -%healthcheck-info dt.warning, -%healthcheck-info dt.warning + dd { - color: $color-alert; -} -%healthcheck-info dt.critical, -%healthcheck-info dt.critical + dd { - color: $color-failure; -} diff --git a/ui-v2/app/styles/components/healthchecked-resource.scss b/ui-v2/app/styles/components/healthchecked-resource.scss deleted file mode 100644 index b9037c1c3..000000000 --- a/ui-v2/app/styles/components/healthchecked-resource.scss +++ /dev/null @@ -1,53 +0,0 @@ -.healthchecked-resource > div { - @extend %stats-card; -} -%stats-card-icon { - @extend %tooltip-below; -} -%stats-card-icon:first-child::before { - @extend %tooltip-left; -} -%stats-card-icon:last-child::before { - @extend %tooltip-right; -} - -%stats-card-icon:last-child { - /* TODO: In order to get rid of our colored star */ - /* this needs to use a %mask, and we are already using */ - /* our before/after psuedo elements for the tooltip */ - /* so this will need reworking slighly before we can */ - /* get rid of our hardcoded magenta star icon */ - @extend %with-star-icon; -} -%stats-card header > .zero { - @extend %with-minus-square-fill-color-icon; - color: $gray-400; -} -%stats-card header > .non-zero { - @extend %with-check-circle-fill-color-icon; - color: $green-500; -} - -%stats-card li a > :first-child { - font-size: 0; - height: 16px; - min-width: 16px; -} -[data-tooltip] { - @extend %with-pseudo-tooltip; -} -%stats-card li a > :last-child { - margin-left: 10px; -} -%stats-card a > :first-child::before { - left: -10px; -} -%stats-card a.passing > :first-child { - @extend %with-check-circle-fill-color-icon; -} -%stats-card a.warning > :first-child { - @extend %with-alert-triangle-color-icon; -} -%stats-card a.critical > :first-child { - @extend %with-cancel-square-fill-color-icon; -} diff --git a/ui-v2/app/styles/components/list-collection.scss b/ui-v2/app/styles/components/list-collection.scss index 78505f903..0e39f1570 100644 --- a/ui-v2/app/styles/components/list-collection.scss +++ b/ui-v2/app/styles/components/list-collection.scss @@ -1,5 +1,7 @@ .list-collection { @extend %list-collection; +} +%list-collection { height: 500px; position: relative; } diff --git a/ui-v2/tests/integration/components/healthcheck-info-test.js b/ui-v2/tests/integration/components/healthcheck-info-test.js deleted file mode 100644 index a4636bd51..000000000 --- a/ui-v2/tests/integration/components/healthcheck-info-test.js +++ /dev/null @@ -1,24 +0,0 @@ -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render } from '@ember/test-helpers'; -import hbs from 'htmlbars-inline-precompile'; - -module('Integration | Component | healthcheck info', function(hooks) { - setupRenderingTest(hooks); - - test('it renders', async function(assert) { - // Set any properties with this.set('myProperty', 'value'); - // Handle any actions with this.on('myAction', function(val) { ... }); - - await render(hbs`{{healthcheck-info}}`); - - assert.dom('dl').exists({ count: 1 }); - - // Template block usage: - await render(hbs` - {{#healthcheck-info}} - {{/healthcheck-info}} - `); - assert.dom('dl').exists({ count: 1 }); - }); -}); diff --git a/ui-v2/tests/integration/components/healthcheck-status-test.js b/ui-v2/tests/integration/components/healthcheck-status-test.js deleted file mode 100644 index 81cdf8117..000000000 --- a/ui-v2/tests/integration/components/healthcheck-status-test.js +++ /dev/null @@ -1,22 +0,0 @@ -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render } from '@ember/test-helpers'; -import hbs from 'htmlbars-inline-precompile'; - -module('Integration | Component | healthcheck status', function(hooks) { - setupRenderingTest(hooks); - - test('it renders', async function(assert) { - // Set any properties with this.set('myProperty', 'value'); - // Handle any actions with this.on('myAction', function(val) { ... }); - - await render(hbs`{{healthcheck-status}}`); - assert.dom('dt').exists({ count: 1 }); - - // Template block usage: - await render(hbs` - {{#healthcheck-status}}{{/healthcheck-status}} - `); - assert.dom('dt').exists({ count: 1 }); - }); -}); diff --git a/ui-v2/tests/integration/components/healthchecked-resource-test.js b/ui-v2/tests/integration/components/healthchecked-resource-test.js deleted file mode 100644 index 37bc8716b..000000000 --- a/ui-v2/tests/integration/components/healthchecked-resource-test.js +++ /dev/null @@ -1,32 +0,0 @@ -import { module, skip } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { find } from '@ember/test-helpers'; -import hbs from 'htmlbars-inline-precompile'; - -module('Integration | Component | healthchecked resource', function(hooks) { - setupRenderingTest(hooks); - - skip('it renders', function(assert) { - // Set any properties with this.set('myProperty', 'value'); - // Handle any actions with this.on('myAction', function(val) { ... }); - - this.render(hbs`{{healthchecked-resource}}`); - - assert.ok( - find('*') - .textContent.trim() - .indexOf('other passing checks') !== -1 - ); - - // Template block usage: - this.render(hbs` - {{#healthchecked-resource}}{{/healthchecked-resource}} - `); - - assert.ok( - find('*') - .textContent.trim() - .indexOf('other passing checks') !== -1 - ); - }); -}); From 736a3c89e715fbf174a2aad380a35aa764c46b81 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 2 Sep 2020 14:47:04 +0100 Subject: [PATCH 20/73] ui: Reinstate tooltip for exposed paths pill (#8598) --- ui-v2/app/styles/components/healthcheck-output.scss | 5 ++++- ui-v2/app/styles/components/healthcheck-output/skin.scss | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ui-v2/app/styles/components/healthcheck-output.scss b/ui-v2/app/styles/components/healthcheck-output.scss index d9e19fdeb..117c43288 100644 --- a/ui-v2/app/styles/components/healthcheck-output.scss +++ b/ui-v2/app/styles/components/healthcheck-output.scss @@ -2,7 +2,10 @@ .healthcheck-output { @extend %healthcheck-output; } -%healthcheck-output em::before { +%healthcheck-output dd em[data-tooltip] { + @extend %with-pseudo-tooltip; +} +%healthcheck-output dd em::before { width: 250px; /* TODO: All tooltips previously used */ /* nowrap, they shouldn't */ diff --git a/ui-v2/app/styles/components/healthcheck-output/skin.scss b/ui-v2/app/styles/components/healthcheck-output/skin.scss index 66faff029..87ffad830 100644 --- a/ui-v2/app/styles/components/healthcheck-output/skin.scss +++ b/ui-v2/app/styles/components/healthcheck-output/skin.scss @@ -16,6 +16,7 @@ } %healthcheck-output dd em { @extend %pill; + background-color: $gray-100; /*TODO: Should this be merged into %pill? */ cursor: default; font-style: normal; From 3af96930eb8bd7a294a190c95e11c0203c68b171 Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:16 -0500 Subject: [PATCH 21/73] Add new usage memdb table that tracks usage counts of various elements We update the usage table on Commit() by using the TrackedChanges() API of memdb. Track memdb changes on restore so that usage data can be compiled --- agent/consul/fsm/snapshot_oss_test.go | 6 + agent/consul/state/memdb.go | 30 +++-- agent/consul/state/usage.go | 170 ++++++++++++++++++++++++++ agent/consul/state/usage_oss.go | 33 +++++ agent/consul/state/usage_oss_test.go | 25 ++++ agent/consul/state/usage_test.go | 133 ++++++++++++++++++++ 6 files changed, 387 insertions(+), 10 deletions(-) create mode 100644 agent/consul/state/usage.go create mode 100644 agent/consul/state/usage_oss.go create mode 100644 agent/consul/state/usage_oss_test.go create mode 100644 agent/consul/state/usage_test.go diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index e845c41c9..f798a0efa 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -654,6 +654,12 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.NoError(t, err) require.Equal(t, fedState2, fedStateLoaded2) + // Verify usage data is correctly updated + idx, nodeCount, err := fsm2.state.NodeCount() + require.NoError(t, err) + require.Equal(t, len(nodes), nodeCount) + require.NotZero(t, idx) + // Snapshot snap, err = fsm2.Snapshot() require.NoError(t, err) diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index be4f4348e..5cdfd19dd 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -89,17 +89,20 @@ func (c *changeTrackerDB) publish(changes Changes) error { return nil } -// WriteTxnRestore returns a wrapped RW transaction that does NOT have change -// tracking enabled. This should only be used in Restore where we need to -// replace the entire contents of the Store without a need to track the changes. -// WriteTxnRestore uses a zero index since the whole restore doesn't really occur -// at one index - the effect is to write many values that were previously +// WriteTxnRestore returns a wrapped RW transaction that should only be used in +// Restore where we need to replace the entire contents of the Store. +// WriteTxnRestore uses a zero index since the whole restore doesn't really +// occur at one index - the effect is to write many values that were previously // written across many indexes. func (c *changeTrackerDB) WriteTxnRestore() *txn { - return &txn{ + t := &txn{ Txn: c.db.Txn(true), Index: 0, } + + // We enable change tracking so that usage data is correctly populated. + t.Txn.TrackChanges() + return t } // txn wraps a memdb.Txn to capture changes and send them to the EventPublisher. @@ -125,14 +128,21 @@ type txn struct { // by the caller. A non-nil error indicates that a commit failed and was not // applied. func (tx *txn) Commit() error { + changes := Changes{ + Index: tx.Index, + Changes: tx.Txn.Changes(), + } + + if len(changes.Changes) > 0 { + if err := updateUsage(tx, changes); err != nil { + return err + } + } + // publish may be nil if this is a read-only or WriteTxnRestore transaction. // In those cases changes should also be empty, and there will be nothing // to publish. if tx.publish != nil { - changes := Changes{ - Index: tx.Index, - Changes: tx.Txn.Changes(), - } if err := tx.publish(changes); err != nil { return err } diff --git a/agent/consul/state/usage.go b/agent/consul/state/usage.go new file mode 100644 index 000000000..397e157f3 --- /dev/null +++ b/agent/consul/state/usage.go @@ -0,0 +1,170 @@ +package state + +import ( + "fmt" + + memdb "github.com/hashicorp/go-memdb" +) + +// usageTableSchema returns a new table schema used for tracking various indexes +// for the Raft log. +func usageTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "usage", + Indexes: map[string]*memdb.IndexSchema{ + "id": { + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + Lowercase: true, + }, + }, + }, + } +} + +func init() { + registerSchema(usageTableSchema) +} + +type UsageEntry struct { + ID string + Index uint64 + Count int +} + +// updateUsage takes a set of memdb changes and computes a delta for specific +// usage metrics that we track. +func updateUsage(tx *txn, changes Changes) error { + usageDeltas := make(map[string]int) + for _, change := range changes.Changes { + var delta int + if change.Created() { + delta = 1 + } else if change.Deleted() { + delta = -1 + } + switch change.Table { + case "nodes": + usageDeltas[change.Table] += delta + case "services": + usageDeltas[change.Table] += delta + } + + addEnterpriseUsage(usageDeltas, change) + } + + idx := changes.Index + // This will happen when restoring from a snapshot, just take the max index + // of the tables we are tracking. + if idx == 0 { + idx = maxIndexTxn(tx, "nodes", "services") + } + + for id, delta := range usageDeltas { + u, err := tx.First("usage", "id", id) + if err != nil { + return fmt.Errorf("failed to retrieve existing usage entry: %s", err) + } + + if u == nil { + if delta < 0 { + return fmt.Errorf("failed to insert usage entry for %q: delta will cause a negative count", id) + } + err := tx.Insert("usage", &UsageEntry{ + ID: id, + Count: delta, + Index: idx, + }) + if err != nil { + return fmt.Errorf("failed to update usage entry: %s", err) + } + } else if cur, ok := u.(*UsageEntry); ok { + if cur.Count+delta < 0 { + return fmt.Errorf("failed to insert usage entry for %q: delta will cause a negative count", id) + } + err := tx.Insert("usage", &UsageEntry{ + ID: id, + Count: cur.Count + delta, + Index: idx, + }) + if err != nil { + return fmt.Errorf("failed to update usage entry: %s", err) + } + } + } + return nil +} + +// ServiceUsage contains all of the usage data related to services +type ServiceUsage struct { + Services int + ServiceInstances int + EnterpriseServiceUsage +} + +// NodeCount returns the latest seen Raft index, a count of the number of nodes +// registered, and any errors. +func (s *Store) NodeCount() (uint64, int, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + usage, err := tx.First("usage", "id", "nodes") + if err != nil { + return 0, 0, fmt.Errorf("failed nodes lookup: %s", err) + } + + // If no nodes have been registered, the usage entry will not exist. + if usage == nil { + return 0, 0, nil + } + + nodeUsage, ok := usage.(*UsageEntry) + if !ok { + return 0, 0, fmt.Errorf("failed nodes lookup: type %T is not *UsageEntry", usage) + } + + return nodeUsage.Index, nodeUsage.Count, nil +} + +// ServiceUsage returns the latest seen Raft index, a compiled set of service +// usage data, and any errors. +func (s *Store) ServiceUsage() (uint64, ServiceUsage, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + usage, err := firstUsageEntry(tx, "services") + if err != nil { + return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) + } + + results, err := s.compileServiceUsage(tx, usage.Count) + if err != nil { + return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) + } + + return usage.Index, results, nil +} + +func firstUsageEntry(tx *txn, id string) (*UsageEntry, error) { + usage, err := tx.First("usage", "id", id) + if err != nil { + return nil, err + } + + // If no elements have been inserted, the usage entry will not exist. We + // return a valid value so that can be certain the return value is not nil + // when no error has occurred. + if usage == nil { + return &UsageEntry{ID: id, Count: 0}, nil + } + + realUsage, ok := usage.(*UsageEntry) + if !ok { + return nil, fmt.Errorf("failed usage lookup: type %T is not *UsageEntry", usage) + } + + return realUsage, nil +} diff --git a/agent/consul/state/usage_oss.go b/agent/consul/state/usage_oss.go new file mode 100644 index 000000000..ec54313d5 --- /dev/null +++ b/agent/consul/state/usage_oss.go @@ -0,0 +1,33 @@ +// +build !consulent + +package state + +import ( + "fmt" + + memdb "github.com/hashicorp/go-memdb" +) + +type EnterpriseServiceUsage struct{} + +func addEnterpriseUsage(map[string]int, memdb.Change) {} + +func (s *Store) compileServiceUsage(tx *txn, totalInstances int) (ServiceUsage, error) { + var totalServices int + results, err := tx.Get( + "index", + "id_prefix", + serviceIndexName("", nil), + ) + if err != nil { + return ServiceUsage{}, fmt.Errorf("failed services index lookup: %s", err) + } + for i := results.Next(); i != nil; i = results.Next() { + totalServices += 1 + } + + return ServiceUsage{ + Services: totalServices, + ServiceInstances: totalInstances, + }, nil +} diff --git a/agent/consul/state/usage_oss_test.go b/agent/consul/state/usage_oss_test.go new file mode 100644 index 000000000..b441c7163 --- /dev/null +++ b/agent/consul/state/usage_oss_test.go @@ -0,0 +1,25 @@ +// +build !consulent + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStateStore_Usage_ServiceUsage(t *testing.T) { + s := testStateStore(t) + + testRegisterNode(t, s, 0, "node1") + testRegisterNode(t, s, 1, "node2") + testRegisterService(t, s, 8, "node1", "service1") + testRegisterService(t, s, 9, "node2", "service1") + testRegisterService(t, s, 10, "node2", "service2") + + idx, usage, err := s.ServiceUsage() + require.NoError(t, err) + require.Equal(t, idx, uint64(10)) + require.Equal(t, 2, usage.Services) + require.Equal(t, 3, usage.ServiceInstances) +} diff --git a/agent/consul/state/usage_test.go b/agent/consul/state/usage_test.go new file mode 100644 index 000000000..a1c07f654 --- /dev/null +++ b/agent/consul/state/usage_test.go @@ -0,0 +1,133 @@ +package state + +import ( + "testing" + + "github.com/hashicorp/consul/agent/structs" + memdb "github.com/hashicorp/go-memdb" + "github.com/stretchr/testify/require" +) + +func TestStateStore_Usage_NodeCount(t *testing.T) { + s := testStateStore(t) + + // No nodes have been registered, and thus no usage entry exists + idx, count, err := s.NodeCount() + require.NoError(t, err) + require.Equal(t, idx, uint64(0)) + require.Equal(t, count, 0) + + testRegisterNode(t, s, 0, "node1") + testRegisterNode(t, s, 1, "node2") + + idx, count, err = s.NodeCount() + require.NoError(t, err) + require.Equal(t, idx, uint64(1)) + require.Equal(t, count, 2) +} + +func TestStateStore_Usage_NodeCount_Delete(t *testing.T) { + s := testStateStore(t) + + testRegisterNode(t, s, 0, "node1") + testRegisterNode(t, s, 1, "node2") + + idx, count, err := s.NodeCount() + require.NoError(t, err) + require.Equal(t, idx, uint64(1)) + require.Equal(t, count, 2) + + require.NoError(t, s.DeleteNode(2, "node2")) + idx, count, err = s.NodeCount() + require.NoError(t, err) + require.Equal(t, idx, uint64(2)) + require.Equal(t, count, 1) +} + +func TestStateStore_Usage_ServiceUsageEmpty(t *testing.T) { + s := testStateStore(t) + + // No services have been registered, and thus no usage entry exists + idx, usage, err := s.ServiceUsage() + require.NoError(t, err) + require.Equal(t, idx, uint64(0)) + require.Equal(t, usage.Services, 0) + require.Equal(t, usage.ServiceInstances, 0) +} + +func TestStateStore_Usage_Restore(t *testing.T) { + s := testStateStore(t) + restore := s.Restore() + restore.Registration(9, &structs.RegisterRequest{ + Node: "test-node", + Service: &structs.NodeService{ + ID: "mysql", + Service: "mysql", + Port: 8080, + Address: "198.18.0.2", + }, + }) + require.NoError(t, restore.Commit()) + + idx, count, err := s.NodeCount() + require.NoError(t, err) + require.Equal(t, idx, uint64(9)) + require.Equal(t, count, 1) +} + +func TestStateStore_Usage_updateUsage_Underflow(t *testing.T) { + s := testStateStore(t) + txn := s.db.WriteTxn(1) + + // A single delete change will cause a negative count + changes := Changes{ + Index: 1, + Changes: memdb.Changes{ + { + Table: "nodes", + Before: &structs.Node{}, + After: nil, + }, + }, + } + + err := updateUsage(txn, changes) + require.Error(t, err) + require.Contains(t, err.Error(), "negative count") + + // A insert a change to create a usage entry + changes = Changes{ + Index: 1, + Changes: memdb.Changes{ + { + Table: "nodes", + Before: nil, + After: &structs.Node{}, + }, + }, + } + + err = updateUsage(txn, changes) + require.NoError(t, err) + + // Two deletes will cause a negative count now + changes = Changes{ + Index: 1, + Changes: memdb.Changes{ + { + Table: "nodes", + Before: &structs.Node{}, + After: nil, + }, + { + Table: "nodes", + Before: &structs.Node{}, + After: nil, + }, + }, + } + + err = updateUsage(txn, changes) + require.Error(t, err) + require.Contains(t, err.Error(), "negative count") +} From 45a4057f603e8a45d97c5aee830d8440bbb44af7 Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:17 -0500 Subject: [PATCH 22/73] Report node/service usage metrics from every server Using the newly provided state store methods, we periodically emit usage metrics from the servers. We decided to emit these metrics from all servers, not just the leader, because that means we do not have to care about leader election flapping causing metrics turbulence, and it seems reasonable for each server to emit its own view of the state, even if they should always converge rapidly. --- agent/consul/config.go | 16 ++- agent/consul/server.go | 14 ++ agent/consul/usagemetrics/usagemetrics.go | 135 ++++++++++++++++++ agent/consul/usagemetrics/usagemetrics_oss.go | 7 + .../usagemetrics/usagemetrics_oss_test.go | 9 ++ .../consul/usagemetrics/usagemetrics_test.go | 128 +++++++++++++++++ logging/names.go | 1 + 7 files changed, 305 insertions(+), 5 deletions(-) create mode 100644 agent/consul/usagemetrics/usagemetrics.go create mode 100644 agent/consul/usagemetrics/usagemetrics_oss.go create mode 100644 agent/consul/usagemetrics/usagemetrics_oss_test.go create mode 100644 agent/consul/usagemetrics/usagemetrics_test.go diff --git a/agent/consul/config.go b/agent/consul/config.go index a48effe44..955fb49d6 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -443,6 +443,10 @@ type Config struct { // dead servers. AutopilotInterval time.Duration + // MetricsReportingInterval is the frequency with which the server will + // report usage metrics to the configured go-metrics Sinks. + MetricsReportingInterval time.Duration + // ConnectEnabled is whether to enable Connect features such as the CA. ConnectEnabled bool @@ -589,11 +593,13 @@ func DefaultConfig() *Config { }, }, - ServerHealthInterval: 2 * time.Second, - AutopilotInterval: 10 * time.Second, - DefaultQueryTime: 300 * time.Second, - MaxQueryTime: 600 * time.Second, - EnterpriseConfig: DefaultEnterpriseConfig(), + ServerHealthInterval: 2 * time.Second, + AutopilotInterval: 10 * time.Second, + MetricsReportingInterval: 10 * time.Second, + DefaultQueryTime: 300 * time.Second, + MaxQueryTime: 600 * time.Second, + + EnterpriseConfig: DefaultEnterpriseConfig(), } // Increase our reap interval to 3 days instead of 24h. diff --git a/agent/consul/server.go b/agent/consul/server.go index 04d3b61bd..c1c1a6d76 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/consul/fsm" "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/usagemetrics" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" @@ -589,6 +590,19 @@ func NewServer(config *Config, options ...ConsulOption) (*Server, error) { return nil, err } + reporter, err := usagemetrics.NewUsageMetricsReporter( + new(usagemetrics.Config). + WithStateProvider(s.fsm). + WithLogger(s.logger). + WithDatacenter(s.config.Datacenter). + WithReportingInterval(s.config.MetricsReportingInterval), + ) + if err != nil { + s.Shutdown() + return nil, fmt.Errorf("Failed to start usage metrics reporter: %v", err) + } + go reporter.Run(&lib.StopChannelContext{StopCh: s.shutdownCh}) + // Initialize Autopilot. This must happen before starting leadership monitoring // as establishing leadership could attempt to use autopilot and cause a panic. s.initAutopilot(config) diff --git a/agent/consul/usagemetrics/usagemetrics.go b/agent/consul/usagemetrics/usagemetrics.go new file mode 100644 index 000000000..18b36cfd6 --- /dev/null +++ b/agent/consul/usagemetrics/usagemetrics.go @@ -0,0 +1,135 @@ +package usagemetrics + +import ( + "context" + "errors" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/logging" + "github.com/hashicorp/go-hclog" +) + +// Config holds the settings for various parameters for the +// UsageMetricsReporter +type Config struct { + logger hclog.Logger + metricLabels []metrics.Label + stateProvider StateProvider + tickerInterval time.Duration +} + +// WithDatacenter adds the datacenter as a label to all metrics emitted by the +// UsageMetricsReporter +func (c *Config) WithDatacenter(dc string) *Config { + c.metricLabels = append(c.metricLabels, metrics.Label{Name: "datacenter", Value: dc}) + return c +} + +// WithLogger takes a logger and creates a new, named sub-logger to use when +// running +func (c *Config) WithLogger(logger hclog.Logger) *Config { + c.logger = logger.Named(logging.UsageMetrics) + return c +} + +// WithReportingInterval specifies the interval on which UsageMetricsReporter +// should emit metrics +func (c *Config) WithReportingInterval(dur time.Duration) *Config { + c.tickerInterval = dur + return c +} + +func (c *Config) WithStateProvider(sp StateProvider) *Config { + c.stateProvider = sp + return c +} + +// StateProvider defines an inteface for retrieving a state.Store handle. In +// non-test code, this is satisfied by the fsm.FSM struct. +type StateProvider interface { + State() *state.Store +} + +// UsageMetricsReporter provides functionality for emitting usage metrics into +// the metrics stream. This makes it essentially a translation layer +// between the state store and metrics stream. +type UsageMetricsReporter struct { + logger hclog.Logger + metricLabels []metrics.Label + stateProvider StateProvider + tickerInterval time.Duration +} + +func NewUsageMetricsReporter(cfg *Config) (*UsageMetricsReporter, error) { + if cfg.stateProvider == nil { + return nil, errors.New("must provide a StateProvider to usage reporter") + } + + if cfg.logger == nil { + cfg.logger = hclog.NewNullLogger() + } + + if cfg.tickerInterval == 0 { + // Metrics are aggregated every 10 seconds, so we default to that. + cfg.tickerInterval = 10 * time.Second + } + + u := &UsageMetricsReporter{ + logger: cfg.logger, + stateProvider: cfg.stateProvider, + metricLabels: cfg.metricLabels, + tickerInterval: cfg.tickerInterval, + } + + return u, nil +} + +// Run must be run in a goroutine, and can be stopped by closing or sending +// data to the passed in shutdownCh +func (u *UsageMetricsReporter) Run(ctx context.Context) { + ticker := time.NewTicker(u.tickerInterval) + for { + select { + case <-ctx.Done(): + u.logger.Debug("usage metrics reporter shutting down") + ticker.Stop() + return + case <-ticker.C: + u.runOnce() + } + } +} + +func (u *UsageMetricsReporter) runOnce() { + state := u.stateProvider.State() + _, nodes, err := state.NodeCount() + if err != nil { + u.logger.Warn("failed to retrieve nodes from state store", "error", err) + } + metrics.SetGaugeWithLabels( + []string{"consul", "state", "nodes"}, + float32(nodes), + u.metricLabels, + ) + + _, serviceUsage, err := state.ServiceUsage() + if err != nil { + u.logger.Warn("failed to retrieve services from state store", "error", err) + } + + metrics.SetGaugeWithLabels( + []string{"consul", "state", "services"}, + float32(serviceUsage.Services), + u.metricLabels, + ) + + metrics.SetGaugeWithLabels( + []string{"consul", "state", "service_instances"}, + float32(serviceUsage.ServiceInstances), + u.metricLabels, + ) + + u.emitEnterpriseUsage(serviceUsage) +} diff --git a/agent/consul/usagemetrics/usagemetrics_oss.go b/agent/consul/usagemetrics/usagemetrics_oss.go new file mode 100644 index 000000000..37d71b83f --- /dev/null +++ b/agent/consul/usagemetrics/usagemetrics_oss.go @@ -0,0 +1,7 @@ +// +build !consulent + +package usagemetrics + +import "github.com/hashicorp/consul/agent/consul/state" + +func (u *UsageMetricsReporter) emitEnterpriseUsage(state.ServiceUsage) {} diff --git a/agent/consul/usagemetrics/usagemetrics_oss_test.go b/agent/consul/usagemetrics/usagemetrics_oss_test.go new file mode 100644 index 000000000..3d5263c0b --- /dev/null +++ b/agent/consul/usagemetrics/usagemetrics_oss_test.go @@ -0,0 +1,9 @@ +// +build !consulent + +package usagemetrics + +import "github.com/hashicorp/consul/agent/consul/state" + +func newStateStore() (*state.Store, error) { + return state.NewStateStore(nil) +} diff --git a/agent/consul/usagemetrics/usagemetrics_test.go b/agent/consul/usagemetrics/usagemetrics_test.go new file mode 100644 index 000000000..c293cbb1d --- /dev/null +++ b/agent/consul/usagemetrics/usagemetrics_test.go @@ -0,0 +1,128 @@ +package usagemetrics + +import ( + "testing" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type mockStateProvider struct { + mock.Mock +} + +func (m *mockStateProvider) State() *state.Store { + retValues := m.Called() + return retValues.Get(0).(*state.Store) +} + +func TestUsageReporter_Run(t *testing.T) { + type testCase struct { + modfiyStateStore func(t *testing.T, s *state.Store) + expectedGauges map[string]metrics.GaugeValue + } + cases := map[string]testCase{ + "empty-state": { + expectedGauges: map[string]metrics.GaugeValue{ + "consul.usage.test.consul.state.nodes;datacenter=dc1": { + Name: "consul.usage.test.consul.state.nodes", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.state.services;datacenter=dc1": { + Name: "consul.usage.test.consul.state.services", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + }, + }, + "consul.usage.test.consul.state.service_instances;datacenter=dc1": { + Name: "consul.usage.test.consul.state.service_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + }, + }, + }, + }, + "nodes-and-services": { + modfiyStateStore: func(t *testing.T, s *state.Store) { + require.Nil(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + require.Nil(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + require.Nil(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) + + // Typical services and some consul services spread across two nodes + require.Nil(t, s.EnsureService(4, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) + require.Nil(t, s.EnsureService(5, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) + require.Nil(t, s.EnsureService(6, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + require.Nil(t, s.EnsureService(7, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + }, + expectedGauges: map[string]metrics.GaugeValue{ + "consul.usage.test.consul.state.nodes;datacenter=dc1": { + Name: "consul.usage.test.consul.state.nodes", + Value: 3, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.state.services;datacenter=dc1": { + Name: "consul.usage.test.consul.state.services", + Value: 3, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + }, + }, + "consul.usage.test.consul.state.service_instances;datacenter=dc1": { + Name: "consul.usage.test.consul.state.service_instances", + Value: 4, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + }, + }, + }, + }, + } + + for name, tcase := range cases { + t.Run(name, func(t *testing.T) { + // Only have a single interval for the test + sink := metrics.NewInmemSink(1*time.Minute, 1*time.Minute) + cfg := metrics.DefaultConfig("consul.usage.test") + cfg.EnableHostname = false + metrics.NewGlobal(cfg, sink) + + mockStateProvider := &mockStateProvider{} + s, err := newStateStore() + require.NoError(t, err) + if tcase.modfiyStateStore != nil { + tcase.modfiyStateStore(t, s) + } + mockStateProvider.On("State").Return(s) + + reporter, err := NewUsageMetricsReporter( + new(Config). + WithStateProvider(mockStateProvider). + WithLogger(testutil.Logger(t)). + WithDatacenter("dc1"), + ) + require.NoError(t, err) + + reporter.runOnce() + + intervals := sink.Data() + require.Len(t, intervals, 1) + intv := intervals[0] + + // Range over the expected values instead of just doing an Equal + // comparison on the maps because of different metrics emitted between + // OSS and Ent. The enterprise tests have a full equality comparison on + // the maps. + for key, expected := range tcase.expectedGauges { + require.Equal(t, expected, intv.Gauges[key]) + } + }) + } +} diff --git a/logging/names.go b/logging/names.go index 6ade11bf6..02c0fbf69 100644 --- a/logging/names.go +++ b/logging/names.go @@ -51,6 +51,7 @@ const ( TerminatingGateway string = "terminating_gateway" TLSUtil string = "tlsutil" Transaction string = "txn" + UsageMetrics string = "usage_metrics" WAN string = "wan" Watch string = "watch" ) From d90d95421d5ff371091969b6e055fb0ea08a541e Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:19 -0500 Subject: [PATCH 23/73] Add WriteTxn interface and convert more functions to ReadTxn We add a WriteTxn interface for use in updating the usage memdb table, with the forward-looking prospect of incrementally converting other functions to accept interfaces. As well, we use the ReadTxn in new usage code, and as a side effect convert a couple of existing functions to use that interface as well. --- agent/consul/state/memdb.go | 7 +++++++ agent/consul/state/usage.go | 6 +++--- agent/consul/state/usage_oss.go | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 5cdfd19dd..3b5d5e696 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -15,6 +15,13 @@ type ReadTxn interface { Abort() } +// WriteTxn is implemented by memdb.Txn to perform write operations. +type WriteTxn interface { + ReadTxn + Insert(table string, obj interface{}) error + Commit() error +} + // Changes wraps a memdb.Changes to include the index at which these changes // were made. type Changes struct { diff --git a/agent/consul/state/usage.go b/agent/consul/state/usage.go index 397e157f3..8b226b3e1 100644 --- a/agent/consul/state/usage.go +++ b/agent/consul/state/usage.go @@ -37,7 +37,7 @@ type UsageEntry struct { // updateUsage takes a set of memdb changes and computes a delta for specific // usage metrics that we track. -func updateUsage(tx *txn, changes Changes) error { +func updateUsage(tx WriteTxn, changes Changes) error { usageDeltas := make(map[string]int) for _, change := range changes.Changes { var delta int @@ -140,7 +140,7 @@ func (s *Store) ServiceUsage() (uint64, ServiceUsage, error) { return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) } - results, err := s.compileServiceUsage(tx, usage.Count) + results, err := compileServiceUsage(tx, usage.Count) if err != nil { return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) } @@ -148,7 +148,7 @@ func (s *Store) ServiceUsage() (uint64, ServiceUsage, error) { return usage.Index, results, nil } -func firstUsageEntry(tx *txn, id string) (*UsageEntry, error) { +func firstUsageEntry(tx ReadTxn, id string) (*UsageEntry, error) { usage, err := tx.First("usage", "id", id) if err != nil { return nil, err diff --git a/agent/consul/state/usage_oss.go b/agent/consul/state/usage_oss.go index ec54313d5..825b80494 100644 --- a/agent/consul/state/usage_oss.go +++ b/agent/consul/state/usage_oss.go @@ -12,7 +12,7 @@ type EnterpriseServiceUsage struct{} func addEnterpriseUsage(map[string]int, memdb.Change) {} -func (s *Store) compileServiceUsage(tx *txn, totalInstances int) (ServiceUsage, error) { +func compileServiceUsage(tx ReadTxn, totalInstances int) (ServiceUsage, error) { var totalServices int results, err := tx.Get( "index", From 79e65343453f7af3fdcf675ff16bfbe30b128ea4 Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:20 -0500 Subject: [PATCH 24/73] Use ReadTxn interface in state store helper functions --- agent/consul/state/operations_oss.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/agent/consul/state/operations_oss.go b/agent/consul/state/operations_oss.go index 4c382694b..30deb7068 100644 --- a/agent/consul/state/operations_oss.go +++ b/agent/consul/state/operations_oss.go @@ -7,30 +7,30 @@ import ( "github.com/hashicorp/go-memdb" ) -func firstWithTxn(tx *txn, +func firstWithTxn(tx ReadTxn, table, index, idxVal string, entMeta *structs.EnterpriseMeta) (interface{}, error) { return tx.First(table, index, idxVal) } -func firstWatchWithTxn(tx *txn, +func firstWatchWithTxn(tx ReadTxn, table, index, idxVal string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(table, index, idxVal) } -func firstWatchCompoundWithTxn(tx *txn, +func firstWatchCompoundWithTxn(tx ReadTxn, table, index string, _ *structs.EnterpriseMeta, idxVals ...interface{}) (<-chan struct{}, interface{}, error) { return tx.FirstWatch(table, index, idxVals...) } -func getWithTxn(tx *txn, +func getWithTxn(tx ReadTxn, table, index, idxVal string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) { return tx.Get(table, index, idxVal) } -func getCompoundWithTxn(tx *txn, table, index string, +func getCompoundWithTxn(tx ReadTxn, table, index string, _ *structs.EnterpriseMeta, idxVals ...interface{}) (memdb.ResultIterator, error) { return tx.Get(table, index, idxVals...) From 80f923a47af203259d95491a3dcfbd54931ab851 Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:21 -0500 Subject: [PATCH 25/73] Refactor state store usage to track unique service names This commit refactors the state store usage code to track unique service name changes on transaction commit. This means we only need to lookup usage entries when reading the information, as opposed to iterating over a large number of service indices. - Take into account a service instance's name being changed - Do not iterate through entire list of service instances, we only care about whether there is 0, 1, or more than 1. --- agent/consul/state/usage.go | 138 +++++++++++++++++++++++++------ agent/consul/state/usage_oss.go | 24 +----- agent/consul/state/usage_test.go | 61 ++++++++++++++ 3 files changed, 177 insertions(+), 46 deletions(-) diff --git a/agent/consul/state/usage.go b/agent/consul/state/usage.go index 8b226b3e1..6e43f3729 100644 --- a/agent/consul/state/usage.go +++ b/agent/consul/state/usage.go @@ -3,9 +3,14 @@ package state import ( "fmt" + "github.com/hashicorp/consul/agent/structs" memdb "github.com/hashicorp/go-memdb" ) +const ( + serviceNamesUsageTable = "service-names" +) + // usageTableSchema returns a new table schema used for tracking various indexes // for the Raft log. func usageTableSchema() *memdb.TableSchema { @@ -29,12 +34,29 @@ func init() { registerSchema(usageTableSchema) } +// UsageEntry represents a count of some arbitrary identifier within the +// state store, along with the last seen index. type UsageEntry struct { ID string Index uint64 Count int } +// ServiceUsage contains all of the usage data related to services +type ServiceUsage struct { + Services int + ServiceInstances int + EnterpriseServiceUsage +} + +type uniqueServiceState int + +const ( + NoChange uniqueServiceState = 0 + Deleted uniqueServiceState = 1 + Created uniqueServiceState = 2 +) + // updateUsage takes a set of memdb changes and computes a delta for specific // usage metrics that we track. func updateUsage(tx WriteTxn, changes Changes) error { @@ -46,23 +68,98 @@ func updateUsage(tx WriteTxn, changes Changes) error { } else if change.Deleted() { delta = -1 } + switch change.Table { case "nodes": usageDeltas[change.Table] += delta case "services": + svc := changeObject(change).(*structs.ServiceNode) usageDeltas[change.Table] += delta - } + serviceIter, err := getWithTxn(tx, servicesTableName, "service", svc.ServiceName, &svc.EnterpriseMeta) + if err != nil { + return err + } - addEnterpriseUsage(usageDeltas, change) + var serviceState uniqueServiceState + if serviceIter.Next() == nil { + // If no services exist, we know we deleted the last service + // instance. + serviceState = Deleted + usageDeltas[serviceNamesUsageTable] -= 1 + } else if serviceIter.Next() == nil { + // If a second call to Next() returns nil, we know only a single + // instance exists. If, in addition, a new service name has been + // registered, either via creating a new service instance or via + // renaming an existing service, than we update our service count. + // + // We only care about two cases here: + // 1. A new service instance has been created with a unique name + // 2. An existing service instance has been updated with a new unique name + // + // These are the only ways a new unique service can be created. The + // other valid cases here: an update that does not change the service + // name, and a deletion, both do not impact the count of unique service + // names in the system. + + if change.Created() { + // Given a single existing service instance of the service: If a + // service has just been created, then we know this is a new unique + // service. + serviceState = Created + usageDeltas[serviceNamesUsageTable] += 1 + } else if serviceNameChanged(change) { + // Given a single existing service instance of the service: If a + // service has been updated with a new service name, then we know + // this is a new unique service. + serviceState = Created + usageDeltas[serviceNamesUsageTable] += 1 + + // Check whether the previous name was deleted in this rename, this + // is a special case of renaming a service which does not result in + // changing the count of unique service names. + before := change.Before.(*structs.ServiceNode) + beforeSvc, err := firstWithTxn(tx, servicesTableName, "service", before.ServiceName, &before.EnterpriseMeta) + if err != nil { + return err + } + if beforeSvc == nil { + usageDeltas[serviceNamesUsageTable] -= 1 + // set serviceState to NoChange since we have both gained and lost a + // service, cancelling each other out + serviceState = NoChange + } + } + } + addEnterpriseServiceUsage(usageDeltas, change, serviceState) + } } idx := changes.Index // This will happen when restoring from a snapshot, just take the max index // of the tables we are tracking. if idx == 0 { - idx = maxIndexTxn(tx, "nodes", "services") + idx = maxIndexTxn(tx, "nodes", servicesTableName) } + return writeUsageDeltas(tx, idx, usageDeltas) +} + +// serviceNameChanged returns a boolean that indicates whether the +// provided change resulted in an update to the service's service name. +func serviceNameChanged(change memdb.Change) bool { + if change.Updated() { + before := change.Before.(*structs.ServiceNode) + after := change.After.(*structs.ServiceNode) + return before.ServiceName != after.ServiceName + } + + return false +} + +// writeUsageDeltas will take in a map of IDs to deltas and update each +// entry accordingly, checking for integer underflow. The index that is +// passed in will be recorded on the entry as well. +func writeUsageDeltas(tx WriteTxn, idx uint64, usageDeltas map[string]int) error { for id, delta := range usageDeltas { u, err := tx.First("usage", "id", id) if err != nil { @@ -98,34 +195,16 @@ func updateUsage(tx WriteTxn, changes Changes) error { return nil } -// ServiceUsage contains all of the usage data related to services -type ServiceUsage struct { - Services int - ServiceInstances int - EnterpriseServiceUsage -} - // NodeCount returns the latest seen Raft index, a count of the number of nodes // registered, and any errors. func (s *Store) NodeCount() (uint64, int, error) { tx := s.db.ReadTxn() defer tx.Abort() - usage, err := tx.First("usage", "id", "nodes") + nodeUsage, err := firstUsageEntry(tx, "nodes") if err != nil { return 0, 0, fmt.Errorf("failed nodes lookup: %s", err) } - - // If no nodes have been registered, the usage entry will not exist. - if usage == nil { - return 0, 0, nil - } - - nodeUsage, ok := usage.(*UsageEntry) - if !ok { - return 0, 0, fmt.Errorf("failed nodes lookup: type %T is not *UsageEntry", usage) - } - return nodeUsage.Index, nodeUsage.Count, nil } @@ -135,17 +214,26 @@ func (s *Store) ServiceUsage() (uint64, ServiceUsage, error) { tx := s.db.ReadTxn() defer tx.Abort() - usage, err := firstUsageEntry(tx, "services") + serviceInstances, err := firstUsageEntry(tx, servicesTableName) if err != nil { return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) } - results, err := compileServiceUsage(tx, usage.Count) + services, err := firstUsageEntry(tx, serviceNamesUsageTable) if err != nil { return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) } - return usage.Index, results, nil + usage := ServiceUsage{ + ServiceInstances: serviceInstances.Count, + Services: services.Count, + } + results, err := compileEnterpriseUsage(tx, usage) + if err != nil { + return 0, ServiceUsage{}, fmt.Errorf("failed services lookup: %s", err) + } + + return serviceInstances.Index, results, nil } func firstUsageEntry(tx ReadTxn, id string) (*UsageEntry, error) { diff --git a/agent/consul/state/usage_oss.go b/agent/consul/state/usage_oss.go index 825b80494..f35576abf 100644 --- a/agent/consul/state/usage_oss.go +++ b/agent/consul/state/usage_oss.go @@ -3,31 +3,13 @@ package state import ( - "fmt" - memdb "github.com/hashicorp/go-memdb" ) type EnterpriseServiceUsage struct{} -func addEnterpriseUsage(map[string]int, memdb.Change) {} +func addEnterpriseServiceUsage(map[string]int, memdb.Change, uniqueServiceState) {} -func compileServiceUsage(tx ReadTxn, totalInstances int) (ServiceUsage, error) { - var totalServices int - results, err := tx.Get( - "index", - "id_prefix", - serviceIndexName("", nil), - ) - if err != nil { - return ServiceUsage{}, fmt.Errorf("failed services index lookup: %s", err) - } - for i := results.Next(); i != nil; i = results.Next() { - totalServices += 1 - } - - return ServiceUsage{ - Services: totalServices, - ServiceInstances: totalInstances, - }, nil +func compileEnterpriseUsage(tx ReadTxn, usage ServiceUsage) (ServiceUsage, error) { + return usage, nil } diff --git a/agent/consul/state/usage_test.go b/agent/consul/state/usage_test.go index a1c07f654..f608d7d75 100644 --- a/agent/consul/state/usage_test.go +++ b/agent/consul/state/usage_test.go @@ -131,3 +131,64 @@ func TestStateStore_Usage_updateUsage_Underflow(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "negative count") } + +func TestStateStore_Usage_ServiceUsage_updatingServiceName(t *testing.T) { + s := testStateStore(t) + testRegisterNode(t, s, 1, "node1") + testRegisterService(t, s, 1, "node1", "service1") + + t.Run("rename service with a single instance", func(t *testing.T) { + svc := &structs.NodeService{ + ID: "service1", + Service: "after", + Address: "1.1.1.1", + Port: 1111, + } + require.NoError(t, s.EnsureService(2, "node1", svc)) + + // We renamed a service with a single instance, so we maintain 1 service. + idx, usage, err := s.ServiceUsage() + require.NoError(t, err) + require.Equal(t, idx, uint64(2)) + require.Equal(t, usage.Services, 1) + require.Equal(t, usage.ServiceInstances, 1) + }) + + t.Run("rename service with a multiple instances", func(t *testing.T) { + svc2 := &structs.NodeService{ + ID: "service2", + Service: "before", + Address: "1.1.1.2", + Port: 1111, + } + require.NoError(t, s.EnsureService(3, "node1", svc2)) + + svc3 := &structs.NodeService{ + ID: "service3", + Service: "before", + Address: "1.1.1.3", + Port: 1111, + } + require.NoError(t, s.EnsureService(4, "node1", svc3)) + + idx, usage, err := s.ServiceUsage() + require.NoError(t, err) + require.Equal(t, idx, uint64(4)) + require.Equal(t, usage.Services, 2) + require.Equal(t, usage.ServiceInstances, 3) + + update := &structs.NodeService{ + ID: "service2", + Service: "another-name", + Address: "1.1.1.2", + Port: 1111, + } + require.NoError(t, s.EnsureService(5, "node1", update)) + + idx, usage, err = s.ServiceUsage() + require.NoError(t, err) + require.Equal(t, idx, uint64(5)) + require.Equal(t, usage.Services, 3) + require.Equal(t, usage.ServiceInstances, 3) + }) +} From e9b397005ced2c94f6a4fb6056af345bd48c8f1b Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:22 -0500 Subject: [PATCH 26/73] Update godoc string for memdb wrapper functions/structs --- agent/consul/state/memdb.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 3b5d5e696..895da9e06 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -31,8 +31,9 @@ type Changes struct { } // changeTrackerDB is a thin wrapper around memdb.DB which enables TrackChanges on -// all write transactions. When the transaction is committed the changes are -// sent to the eventPublisher which will create and emit change events. +// all write transactions. When the transaction is committed the changes are: +// 1. Used to update our internal usage tracking +// 2. Sent to the eventPublisher which will create and emit change events type changeTrackerDB struct { db *memdb.MemDB publisher eventPublisher @@ -100,7 +101,8 @@ func (c *changeTrackerDB) publish(changes Changes) error { // Restore where we need to replace the entire contents of the Store. // WriteTxnRestore uses a zero index since the whole restore doesn't really // occur at one index - the effect is to write many values that were previously -// written across many indexes. +// written across many indexes. WriteTxnRestore also does not publish any +// change events to subscribers. func (c *changeTrackerDB) WriteTxnRestore() *txn { t := &txn{ Txn: c.db.Txn(true), From b245d60200edb960d6956e6158146ca7ba8eb88e Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:24:23 -0500 Subject: [PATCH 27/73] Set metrics reporting interval to 9 seconds This is below the 10 second interval that lib/telemetry.go implements as its aggregation interval, ensuring that we always report these metrics. --- agent/consul/config.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/agent/consul/config.go b/agent/consul/config.go index 955fb49d6..431647565 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -593,9 +593,12 @@ func DefaultConfig() *Config { }, }, + // Stay under the 10 second aggregation interval of + // go-metrics. This ensures we always report the + // usage metrics in each cycle. + MetricsReportingInterval: 9 * time.Second, ServerHealthInterval: 2 * time.Second, AutopilotInterval: 10 * time.Second, - MetricsReportingInterval: 10 * time.Second, DefaultQueryTime: 300 * time.Second, MaxQueryTime: 600 * time.Second, From d661197a3ca5ef10a3fc165f85bf3e1b1fa18e0b Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:38:29 -0500 Subject: [PATCH 28/73] docs: add new usage metrics to telemetry page --- website/pages/docs/agent/telemetry.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/pages/docs/agent/telemetry.mdx b/website/pages/docs/agent/telemetry.mdx index 449997fcd..ea40e2ee6 100644 --- a/website/pages/docs/agent/telemetry.mdx +++ b/website/pages/docs/agent/telemetry.mdx @@ -171,6 +171,9 @@ This is a full list of metrics emitted by Consul. | `consul.runtime.num_goroutines` | This tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. | number of goroutines | gauge | | `consul.runtime.alloc_bytes` | This measures the number of bytes allocated by the Consul process. This may burst from time to time but should return to a steady state value. | bytes | gauge | | `consul.runtime.heap_objects` | This measures the number of objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. | number of objects | gauge | +| `consul.state.nodes` | This meansures the current number of nodes registered with Consul. It is only emitted by Consul servers. | number of objects | gauge | +| `consul.state.services` | This meansures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. | number of objects | gauge | +| `consul.state.service_instances` | This meansures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. | number of objects | gauge | | `consul.acl.cache_hit` | The number of ACL cache hits. | hits | counter | | `consul.acl.cache_miss` | The number of ACL cache misses. | misses | counter | | `consul.acl.replication_hit` | The number of ACL replication cache hits (when not running in the ACL datacenter). | hits | counter | From 4197bed23b3601e202ee9db88751ffb37590b95a Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 2 Sep 2020 10:47:19 -0500 Subject: [PATCH 29/73] connect: fix bug in preventing some namespaced config entry modifications (#8601) Whenever an upsert/deletion of a config entry happens, within the open state store transaction we speculatively test compile all discovery chains that may be affected by the pending modification to verify that the write would not create an erroneous scenario (such as splitting traffic to a subset that did not exist). If a single discovery chain evaluation references two config entries with the same kind and name in different namespaces then sometimes the upsert/deletion would be falsely rejected. It does not appear as though this bug would've let invalid writes through to the state store so the correction does not require a cleanup phase. --- .changelog/8601.txt | 3 + agent/consul/state/config_entry.go | 7 +- agent/consul/state/config_entry_test.go | 96 +++++++++++++------------ agent/structs/config_entry.go | 15 ++++ 4 files changed, 71 insertions(+), 50 deletions(-) create mode 100644 .changelog/8601.txt diff --git a/.changelog/8601.txt b/.changelog/8601.txt new file mode 100644 index 000000000..f791fe2ef --- /dev/null +++ b/.changelog/8601.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: fix bug in preventing some namespaced config entry modifications +``` diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index 44113f46a..f19205bbe 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -467,7 +467,7 @@ func validateProposedConfigEntryInServiceGraph( } overrides := map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: kind, Name: name}: next, + structs.NewConfigEntryKindName(kind, name, entMeta): next, } var ( @@ -909,9 +909,8 @@ func configEntryWithOverridesTxn( entMeta *structs.EnterpriseMeta, ) (uint64, structs.ConfigEntry, error) { if len(overrides) > 0 { - entry, ok := overrides[structs.ConfigEntryKindName{ - Kind: kind, Name: name, - }] + kn := structs.NewConfigEntryKindName(kind, name, entMeta) + entry, ok := overrides[kn] if ok { return 0, entry, nil // a nil entry implies it should act like it is erased } diff --git a/agent/consul/state/config_entry_test.go b/agent/consul/state/config_entry_test.go index 4cee1a6c4..fcf7624a5 100644 --- a/agent/consul/state/config_entry_test.go +++ b/agent/consul/state/config_entry_test.go @@ -880,10 +880,10 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceDefaults, Name: "main"}: nil, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil, }, expectAfter: []structs.ConfigEntryKindName{ // nothing @@ -899,17 +899,17 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceDefaults, Name: "main"}: &structs.ServiceConfigEntry{ + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{ Kind: structs.ServiceDefaults, Name: "main", Protocol: "grpc", }, }, expectAfter: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), }, checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { defaults := entrySet.GetService(structs.NewServiceID("main", nil)) @@ -932,14 +932,14 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, - {Kind: structs.ServiceRouter, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceRouter, Name: "main"}: nil, + structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil, }, expectAfter: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), }, }, { @@ -977,12 +977,12 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, - {Kind: structs.ServiceResolver, Name: "main"}, - {Kind: structs.ServiceRouter, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceRouter, Name: "main"}: &structs.ServiceRouterConfigEntry{ + structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{ Kind: structs.ServiceRouter, Name: "main", Routes: []structs.ServiceRoute{ @@ -1000,9 +1000,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectAfter: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, - {Kind: structs.ServiceResolver, Name: "main"}, - {Kind: structs.ServiceRouter, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil), }, checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { router := entrySet.GetRouter(structs.NewServiceID("main", nil)) @@ -1040,14 +1040,14 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, - {Kind: structs.ServiceSplitter, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceSplitter, Name: "main"}: nil, + structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil, }, expectAfter: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), }, }, { @@ -1067,11 +1067,11 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, - {Kind: structs.ServiceSplitter, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceSplitter, Name: "main"}: &structs.ServiceSplitterConfigEntry{ + structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{ Kind: structs.ServiceSplitter, Name: "main", Splits: []structs.ServiceSplit{ @@ -1081,8 +1081,8 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectAfter: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceDefaults, Name: "main"}, - {Kind: structs.ServiceSplitter, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil), + structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil), }, checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { splitter := entrySet.GetSplitter(structs.NewServiceID("main", nil)) @@ -1106,10 +1106,10 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceResolver, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceResolver, Name: "main"}: nil, + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil, }, expectAfter: []structs.ConfigEntryKindName{ // nothing @@ -1124,17 +1124,17 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { }, }, expectBefore: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceResolver, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), }, overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{ - {Kind: structs.ServiceResolver, Name: "main"}: &structs.ServiceResolverConfigEntry{ + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{ Kind: structs.ServiceResolver, Name: "main", ConnectTimeout: 33 * time.Second, }, }, expectAfter: []structs.ConfigEntryKindName{ - {Kind: structs.ServiceResolver, Name: "main"}, + structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil), }, checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) { resolver := entrySet.GetResolver(structs.NewServiceID("main", nil)) @@ -1181,28 +1181,32 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) { func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []structs.ConfigEntryKindName { var out []structs.ConfigEntryKindName for _, entry := range entrySet.Routers { - out = append(out, structs.ConfigEntryKindName{ - Kind: entry.Kind, - Name: entry.Name, - }) + out = append(out, structs.NewConfigEntryKindName( + entry.Kind, + entry.Name, + &entry.EnterpriseMeta, + )) } for _, entry := range entrySet.Splitters { - out = append(out, structs.ConfigEntryKindName{ - Kind: entry.Kind, - Name: entry.Name, - }) + out = append(out, structs.NewConfigEntryKindName( + entry.Kind, + entry.Name, + &entry.EnterpriseMeta, + )) } for _, entry := range entrySet.Resolvers { - out = append(out, structs.ConfigEntryKindName{ - Kind: entry.Kind, - Name: entry.Name, - }) + out = append(out, structs.NewConfigEntryKindName( + entry.Kind, + entry.Name, + &entry.EnterpriseMeta, + )) } for _, entry := range entrySet.Services { - out = append(out, structs.ConfigEntryKindName{ - Kind: entry.Kind, - Name: entry.Name, - }) + out = append(out, structs.NewConfigEntryKindName( + entry.Kind, + entry.Name, + &entry.EnterpriseMeta, + )) } return out } diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index d377f83a7..4cafb9b29 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -666,4 +666,19 @@ func (c *ConfigEntryResponse) UnmarshalBinary(data []byte) error { type ConfigEntryKindName struct { Kind string Name string + EnterpriseMeta +} + +func NewConfigEntryKindName(kind, name string, entMeta *EnterpriseMeta) ConfigEntryKindName { + ret := ConfigEntryKindName{ + Kind: kind, + Name: name, + } + if entMeta == nil { + entMeta = DefaultEnterpriseMeta() + } + + ret.EnterpriseMeta = *entMeta + ret.EnterpriseMeta.Normalize() + return ret } From 4bc16c2eb16c80066f6c7aeb43d01e56e9c05f5b Mon Sep 17 00:00:00 2001 From: Chris Piraino Date: Wed, 2 Sep 2020 10:39:35 -0500 Subject: [PATCH 30/73] Changelog entry for usage metrics --- .changelog/8603.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/8603.txt diff --git a/.changelog/8603.txt b/.changelog/8603.txt new file mode 100644 index 000000000..ffe9a9401 --- /dev/null +++ b/.changelog/8603.txt @@ -0,0 +1,3 @@ +```release-note:feature +telemetry: track node and service counts and emit them as metrics +``` From b0bde51e7000063368298fac8d1287e56e3eacbe Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Wed, 2 Sep 2020 14:10:25 -0500 Subject: [PATCH 31/73] connect: all config entries pick up a meta field (#8596) Fixes #8595 --- .changelog/8596.txt | 3 + agent/config/runtime_test.go | 48 +++++- agent/structs/config_entry.go | 43 ++++- agent/structs/config_entry_discoverychain.go | 36 ++++ agent/structs/config_entry_gateways.go | 25 +++ agent/structs/config_entry_test.go | 92 ++++++++++- api/config_entry.go | 2 + api/config_entry_discoverychain.go | 3 + api/config_entry_gateways.go | 4 + api/config_entry_test.go | 66 +++++++- command/config/write/config_write_test.go | 156 +++++++++++++++++- .../agent/config-entries/ingress-gateway.mdx | 2 + .../agent/config-entries/proxy-defaults.mdx | 2 + .../agent/config-entries/service-defaults.mdx | 2 + .../agent/config-entries/service-resolver.mdx | 4 + .../agent/config-entries/service-router.mdx | 4 + .../agent/config-entries/service-splitter.mdx | 4 + .../config-entries/terminating-gateway.mdx | 2 + 18 files changed, 478 insertions(+), 20 deletions(-) create mode 100644 .changelog/8596.txt diff --git a/.changelog/8596.txt b/.changelog/8596.txt new file mode 100644 index 000000000..0c96f0c7d --- /dev/null +++ b/.changelog/8596.txt @@ -0,0 +1,3 @@ +```release-note:feature +connect: all config entries pick up a meta field +``` diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index adbc269e6..906724ac6 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -3436,6 +3436,10 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { { "kind": "service-defaults", "name": "web", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "protocol": "http", "external_sni": "abc-123", "mesh_gateway": { @@ -3450,6 +3454,10 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { bootstrap { kind = "service-defaults" name = "web" + meta { + "foo" = "bar" + "gir" = "zim" + } protocol = "http" external_sni = "abc-123" mesh_gateway { @@ -3461,8 +3469,12 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { rt.DataDir = dataDir rt.ConfigEntryBootstrap = []structs.ConfigEntry{ &structs.ServiceConfigEntry{ - Kind: structs.ServiceDefaults, - Name: "web", + Kind: structs.ServiceDefaults, + Name: "web", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, EnterpriseMeta: *defaultEntMeta, Protocol: "http", ExternalSNI: "abc-123", @@ -3482,6 +3494,10 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { { "Kind": "service-defaults", "Name": "web", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Protocol": "http", "ExternalSNI": "abc-123", "MeshGateway": { @@ -3496,6 +3512,10 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { bootstrap { Kind = "service-defaults" Name = "web" + Meta { + "foo" = "bar" + "gir" = "zim" + } Protocol = "http" ExternalSNI = "abc-123" MeshGateway { @@ -3507,8 +3527,12 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { rt.DataDir = dataDir rt.ConfigEntryBootstrap = []structs.ConfigEntry{ &structs.ServiceConfigEntry{ - Kind: structs.ServiceDefaults, - Name: "web", + Kind: structs.ServiceDefaults, + Name: "web", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, EnterpriseMeta: *defaultEntMeta, Protocol: "http", ExternalSNI: "abc-123", @@ -3528,6 +3552,10 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { { "kind": "service-router", "name": "main", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "routes": [ { "match": { @@ -3612,6 +3640,10 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { bootstrap { kind = "service-router" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } routes = [ { match { @@ -3693,8 +3725,12 @@ func TestBuilder_BuildAndValide_ConfigFlagsAndEdgecases(t *testing.T) { rt.DataDir = dataDir rt.ConfigEntryBootstrap = []structs.ConfigEntry{ &structs.ServiceRouterConfigEntry{ - Kind: structs.ServiceRouter, - Name: "main", + Kind: structs.ServiceRouter, + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, EnterpriseMeta: *defaultEntMeta, Routes: []structs.ServiceRoute{ { diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 4cafb9b29..b1ffd3e0d 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/decode" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/go-multierror" "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" ) @@ -43,6 +44,7 @@ type ConfigEntry interface { CanRead(acl.Authorizer) bool CanWrite(acl.Authorizer) bool + GetMeta() map[string]string GetEnterpriseMeta() *EnterpriseMeta GetRaftIndex() *RaftIndex } @@ -64,6 +66,7 @@ type ServiceConfigEntry struct { // // Connect ConnectConfiguration + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -80,6 +83,13 @@ func (e *ServiceConfigEntry) GetName() string { return e.Name } +func (e *ServiceConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *ServiceConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -94,7 +104,7 @@ func (e *ServiceConfigEntry) Normalize() error { } func (e *ServiceConfigEntry) Validate() error { - return nil + return validateConfigEntryMeta(e.Meta) } func (e *ServiceConfigEntry) CanRead(authz acl.Authorizer) bool { @@ -137,6 +147,7 @@ type ProxyConfigEntry struct { MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -153,6 +164,13 @@ func (e *ProxyConfigEntry) GetName() string { return e.Name } +func (e *ProxyConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *ProxyConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -175,6 +193,10 @@ func (e *ProxyConfigEntry) Validate() error { return fmt.Errorf("invalid name (%q), only %q is supported", e.Name, ProxyConfigGlobal) } + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } + return e.validateEnterpriseMeta() } @@ -682,3 +704,22 @@ func NewConfigEntryKindName(kind, name string, entMeta *EnterpriseMeta) ConfigEn ret.EnterpriseMeta.Normalize() return ret } + +func validateConfigEntryMeta(meta map[string]string) error { + var err error + if len(meta) > metaMaxKeyPairs { + err = multierror.Append(err, fmt.Errorf( + "Meta exceeds maximum element count %d", metaMaxKeyPairs)) + } + for k, v := range meta { + if len(k) > metaKeyMaxLength { + err = multierror.Append(err, fmt.Errorf( + "Meta key %q exceeds maximum length %d", k, metaKeyMaxLength)) + } + if len(v) > metaValueMaxLength { + err = multierror.Append(err, fmt.Errorf( + "Meta value for key %q exceeds maximum length %d", k, metaValueMaxLength)) + } + } + return err +} diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 04cf32353..57d0a69fe 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -38,6 +38,7 @@ type ServiceRouterConfigEntry struct { // the default service. Routes []ServiceRoute + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -54,6 +55,13 @@ func (e *ServiceRouterConfigEntry) GetName() string { return e.Name } +func (e *ServiceRouterConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *ServiceRouterConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -89,6 +97,10 @@ func (e *ServiceRouterConfigEntry) Validate() error { return fmt.Errorf("Name is required") } + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } + // Technically you can have no explicit routes at all where just the // catch-all is configured for you, but at that point maybe you should just // delete it so it will default? @@ -407,6 +419,7 @@ type ServiceSplitterConfigEntry struct { // to the FIRST split. Splits []ServiceSplit + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -423,6 +436,13 @@ func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name } +func (e *ServiceSplitterConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *ServiceSplitterConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -461,6 +481,10 @@ func (e *ServiceSplitterConfigEntry) Validate() error { return fmt.Errorf("no splits configured") } + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } + const maxScaledWeight = 100 * 100 copyAsKey := func(s ServiceSplit) ServiceSplit { @@ -639,6 +663,7 @@ type ServiceResolverConfigEntry struct { // to this service. ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -710,6 +735,13 @@ func (e *ServiceResolverConfigEntry) GetName() string { return e.Name } +func (e *ServiceResolverConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *ServiceResolverConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -727,6 +759,10 @@ func (e *ServiceResolverConfigEntry) Validate() error { return fmt.Errorf("Name is required") } + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } + if len(e.Subsets) > 0 { for name := range e.Subsets { if name == "" { diff --git a/agent/structs/config_entry_gateways.go b/agent/structs/config_entry_gateways.go index a5557dbaf..61b993082 100644 --- a/agent/structs/config_entry_gateways.go +++ b/agent/structs/config_entry_gateways.go @@ -27,6 +27,7 @@ type IngressGatewayConfigEntry struct { // what services to associated to those ports. Listeners []IngressListener + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -73,6 +74,7 @@ type IngressService struct { // using a "tcp" listener. Hosts []string + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } @@ -93,6 +95,13 @@ func (e *IngressGatewayConfigEntry) GetName() string { return e.Name } +func (e *IngressGatewayConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *IngressGatewayConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -121,6 +130,10 @@ func (e *IngressGatewayConfigEntry) Normalize() error { } func (e *IngressGatewayConfigEntry) Validate() error { + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } + validProtocols := map[string]bool{ "tcp": true, "http": true, @@ -283,6 +296,7 @@ type TerminatingGatewayConfigEntry struct { Name string Services []LinkedService + Meta map[string]string `json:",omitempty"` EnterpriseMeta `hcl:",squash" mapstructure:",squash"` RaftIndex } @@ -322,6 +336,13 @@ func (e *TerminatingGatewayConfigEntry) GetName() string { return e.Name } +func (e *TerminatingGatewayConfigEntry) GetMeta() map[string]string { + if e == nil { + return nil + } + return e.Meta +} + func (e *TerminatingGatewayConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") @@ -339,6 +360,10 @@ func (e *TerminatingGatewayConfigEntry) Normalize() error { } func (e *TerminatingGatewayConfigEntry) Validate() error { + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } + seen := make(map[ServiceID]bool) for _, svc := range e.Services { diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index 34ccfbaf4..d855529f4 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -46,6 +46,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "proxy-defaults" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } config { "foo" = 19 "bar" = "abc" @@ -60,6 +64,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "proxy-defaults" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Config { "foo" = 19 "bar" = "abc" @@ -74,6 +82,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &ProxyConfigEntry{ Kind: "proxy-defaults", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Config: map[string]interface{}{ "foo": 19, "bar": "abc", @@ -91,6 +103,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "service-defaults" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } protocol = "http" external_sni = "abc-123" mesh_gateway { @@ -100,6 +116,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "service-defaults" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Protocol = "http" ExternalSNI = "abc-123" MeshGateway { @@ -107,8 +127,12 @@ func TestDecodeConfigEntry(t *testing.T) { } `, expect: &ServiceConfigEntry{ - Kind: "service-defaults", - Name: "main", + Kind: "service-defaults", + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Protocol: "http", ExternalSNI: "abc-123", MeshGateway: MeshGatewayConfig{ @@ -121,6 +145,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "service-router" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } routes = [ { match { @@ -200,6 +228,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "service-router" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Routes = [ { Match { @@ -279,6 +311,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &ServiceRouterConfigEntry{ Kind: "service-router", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Routes: []ServiceRoute{ { Match: &ServiceRouteMatch{ @@ -361,6 +397,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "service-splitter" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } splits = [ { weight = 99.1 @@ -376,6 +416,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "service-splitter" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Splits = [ { Weight = 99.1 @@ -391,6 +435,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &ServiceSplitterConfigEntry{ Kind: ServiceSplitter, Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Splits: []ServiceSplit{ { Weight: 99.1, @@ -409,6 +457,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "service-resolver" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } default_subset = "v1" connect_timeout = "15s" subsets = { @@ -434,6 +486,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "service-resolver" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } DefaultSubset = "v1" ConnectTimeout = "15s" Subsets = { @@ -457,8 +513,12 @@ func TestDecodeConfigEntry(t *testing.T) { } }`, expect: &ServiceResolverConfigEntry{ - Kind: "service-resolver", - Name: "main", + Kind: "service-resolver", + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, DefaultSubset: "v1", ConnectTimeout: 15 * time.Second, Subsets: map[string]ServiceResolverSubset{ @@ -536,6 +596,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "ingress-gateway" name = "ingress-web" + meta { + "foo" = "bar" + "gir" = "zim" + } tls { enabled = true @@ -578,6 +642,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "ingress-gateway" Name = "ingress-web" + Meta { + "foo" = "bar" + "gir" = "zim" + } TLS { Enabled = true } @@ -618,6 +686,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &IngressGatewayConfigEntry{ Kind: "ingress-gateway", Name: "ingress-web", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, TLS: GatewayTLSConfig{ Enabled: true, }, @@ -661,6 +733,10 @@ func TestDecodeConfigEntry(t *testing.T) { snake: ` kind = "terminating-gateway" name = "terminating-gw-west" + meta { + "foo" = "bar" + "gir" = "zim" + } services = [ { name = "payments", @@ -681,6 +757,10 @@ func TestDecodeConfigEntry(t *testing.T) { camel: ` Kind = "terminating-gateway" Name = "terminating-gw-west" + Meta { + "foo" = "bar" + "gir" = "zim" + } Services = [ { Name = "payments", @@ -701,6 +781,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &TerminatingGatewayConfigEntry{ Kind: "terminating-gateway", Name: "terminating-gw-west", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Services: []LinkedService{ { Name: "payments", diff --git a/api/config_entry.go b/api/config_entry.go index dc31d6110..a234f6eb2 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -95,6 +95,7 @@ type ServiceConfigEntry struct { MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` ExternalSNI string `json:",omitempty" alias:"external_sni"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 } @@ -122,6 +123,7 @@ type ProxyConfigEntry struct { Config map[string]interface{} `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 } diff --git a/api/config_entry_discoverychain.go b/api/config_entry_discoverychain.go index f3994f0dd..209106339 100644 --- a/api/config_entry_discoverychain.go +++ b/api/config_entry_discoverychain.go @@ -12,6 +12,7 @@ type ServiceRouterConfigEntry struct { Routes []ServiceRoute `json:",omitempty"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 } @@ -111,6 +112,7 @@ type ServiceSplitterConfigEntry struct { Splits []ServiceSplit `json:",omitempty"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 } @@ -138,6 +140,7 @@ type ServiceResolverConfigEntry struct { Failover map[string]ServiceResolverFailover `json:",omitempty"` ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 } diff --git a/api/config_entry_gateways.go b/api/config_entry_gateways.go index 9d3ee0a6a..e259427d8 100644 --- a/api/config_entry_gateways.go +++ b/api/config_entry_gateways.go @@ -21,6 +21,8 @@ type IngressGatewayConfigEntry struct { // what services to associated to those ports. Listeners []IngressListener + Meta map[string]string `json:",omitempty"` + // CreateIndex is the Raft index this entry was created at. This is a // read-only field. CreateIndex uint64 @@ -115,6 +117,8 @@ type TerminatingGatewayConfigEntry struct { // Services is a list of service names represented by the terminating gateway. Services []LinkedService `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + // CreateIndex is the Raft index this entry was created at. This is a // read-only field. CreateIndex uint64 diff --git a/api/config_entry_test.go b/api/config_entry_test.go index aa384bb9f..fac1665d3 100644 --- a/api/config_entry_test.go +++ b/api/config_entry_test.go @@ -271,6 +271,10 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "proxy-defaults", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Config": { "foo": 19, "bar": "abc", @@ -286,6 +290,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &ProxyConfigEntry{ Kind: "proxy-defaults", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Config: map[string]interface{}{ "foo": float64(19), "bar": "abc", @@ -304,6 +312,10 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "service-defaults", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Protocol": "http", "ExternalSNI": "abc-123", "MeshGateway": { @@ -312,8 +324,12 @@ func TestDecodeConfigEntry(t *testing.T) { } `, expect: &ServiceConfigEntry{ - Kind: "service-defaults", - Name: "main", + Kind: "service-defaults", + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Protocol: "http", ExternalSNI: "abc-123", MeshGateway: MeshGatewayConfig{ @@ -327,6 +343,10 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "service-router", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Routes": [ { "Match": { @@ -407,6 +427,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &ServiceRouterConfigEntry{ Kind: "service-router", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Routes: []ServiceRoute{ { Match: &ServiceRouteMatch{ @@ -490,6 +514,10 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "service-splitter", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Splits": [ { "Weight": 99.1, @@ -506,6 +534,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &ServiceSplitterConfigEntry{ Kind: ServiceSplitter, Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Splits: []ServiceSplit{ { Weight: 99.1, @@ -525,6 +557,10 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "service-resolver", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "DefaultSubset": "v1", "ConnectTimeout": "15s", "Subsets": { @@ -549,8 +585,12 @@ func TestDecodeConfigEntry(t *testing.T) { } }`, expect: &ServiceResolverConfigEntry{ - Kind: "service-resolver", - Name: "main", + Kind: "service-resolver", + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, DefaultSubset: "v1", ConnectTimeout: 15 * time.Second, Subsets: map[string]ServiceResolverSubset{ @@ -619,6 +659,10 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "ingress-gateway", "Name": "ingress-web", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Tls": { "Enabled": true }, @@ -651,6 +695,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &IngressGatewayConfigEntry{ Kind: "ingress-gateway", Name: "ingress-web", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, TLS: GatewayTLSConfig{ Enabled: true, }, @@ -686,9 +734,13 @@ func TestDecodeConfigEntry(t *testing.T) { { "Kind": "terminating-gateway", "Name": "terminating-west", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Services": [ { - "Namespace": "foo", + "Namespace": "foo", "Name": "web", "CAFile": "/etc/ca.pem", "CertFile": "/etc/cert.pem", @@ -707,6 +759,10 @@ func TestDecodeConfigEntry(t *testing.T) { expect: &TerminatingGatewayConfigEntry{ Kind: "terminating-gateway", Name: "terminating-west", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Services: []LinkedService{ { Namespace: "foo", diff --git a/command/config/write/config_write_test.go b/command/config/write/config_write_test.go index a5b9120b2..f11e907fd 100644 --- a/command/config/write/config_write_test.go +++ b/command/config/write/config_write_test.go @@ -161,6 +161,10 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "proxy-defaults" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } config { "foo" = 19 "bar" = "abc" @@ -175,6 +179,10 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "proxy-defaults" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Config { "foo" = 19 "bar" = "abc" @@ -190,6 +198,10 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "proxy-defaults", "name": "main", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "config": { "foo": 19, "bar": "abc", @@ -206,6 +218,10 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "proxy-defaults", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Config": { "foo": 19, "bar": "abc", @@ -221,6 +237,10 @@ func TestParseConfigEntry(t *testing.T) { expect: &api.ProxyConfigEntry{ Kind: "proxy-defaults", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Config: map[string]interface{}{ "foo": 19, "bar": "abc", @@ -235,6 +255,10 @@ func TestParseConfigEntry(t *testing.T) { expectJSON: &api.ProxyConfigEntry{ Kind: "proxy-defaults", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Config: map[string]interface{}{ "foo": float64(19), // json decoding gives float64 instead of int here "bar": "abc", @@ -253,6 +277,10 @@ func TestParseConfigEntry(t *testing.T) { kind = "terminating-gateway" name = "terminating-gw-west" namespace = "default" + meta { + "foo" = "bar" + "gir" = "zim" + } services = [ { name = "billing" @@ -272,6 +300,10 @@ func TestParseConfigEntry(t *testing.T) { Kind = "terminating-gateway" Name = "terminating-gw-west" Namespace = "default" + Meta { + "foo" = "bar" + "gir" = "zim" + } Services = [ { Name = "billing" @@ -292,6 +324,10 @@ func TestParseConfigEntry(t *testing.T) { "kind": "terminating-gateway", "name": "terminating-gw-west", "namespace": "default", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "services": [ { "name": "billing", @@ -313,6 +349,10 @@ func TestParseConfigEntry(t *testing.T) { "Kind": "terminating-gateway", "Name": "terminating-gw-west", "Namespace": "default", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Services": [ { "Name": "billing", @@ -333,6 +373,10 @@ func TestParseConfigEntry(t *testing.T) { Kind: "terminating-gateway", Name: "terminating-gw-west", Namespace: "default", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Services: []api.LinkedService{ { Name: "billing", @@ -352,6 +396,10 @@ func TestParseConfigEntry(t *testing.T) { Kind: "terminating-gateway", Name: "terminating-gw-west", Namespace: "default", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Services: []api.LinkedService{ { Name: "billing", @@ -373,6 +421,10 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-defaults" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } protocol = "http" external_sni = "abc-123" mesh_gateway { @@ -382,6 +434,10 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "service-defaults" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Protocol = "http" ExternalSNI = "abc-123" MeshGateway { @@ -392,6 +448,10 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-defaults", "name": "main", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "protocol": "http", "external_sni": "abc-123", "mesh_gateway": { @@ -403,6 +463,10 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-defaults", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Protocol": "http", "ExternalSNI": "abc-123", "MeshGateway": { @@ -411,8 +475,12 @@ func TestParseConfigEntry(t *testing.T) { } `, expect: &api.ServiceConfigEntry{ - Kind: "service-defaults", - Name: "main", + Kind: "service-defaults", + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Protocol: "http", ExternalSNI: "abc-123", MeshGateway: api.MeshGatewayConfig{ @@ -425,6 +493,10 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-router" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } routes = [ { match { @@ -504,6 +576,10 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "service-router" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Routes = [ { Match { @@ -584,6 +660,10 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-router", "name": "main", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "routes": [ { "match": { @@ -671,6 +751,10 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-router", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Routes": [ { "Match": { @@ -757,6 +841,10 @@ func TestParseConfigEntry(t *testing.T) { expect: &api.ServiceRouterConfigEntry{ Kind: "service-router", Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Routes: []api.ServiceRoute{ { Match: &api.ServiceRouteMatch{ @@ -839,6 +927,10 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-splitter" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } splits = [ { weight = 97.1 @@ -858,6 +950,10 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "service-splitter" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } Splits = [ { Weight = 97.1 @@ -878,6 +974,10 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-splitter", "name": "main", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "splits": [ { "weight": 97.1, @@ -899,6 +999,10 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-splitter", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Splits": [ { "Weight": 97.1, @@ -919,6 +1023,10 @@ func TestParseConfigEntry(t *testing.T) { expect: &api.ServiceSplitterConfigEntry{ Kind: api.ServiceSplitter, Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, Splits: []api.ServiceSplit{ { Weight: 97.1, @@ -941,6 +1049,10 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-resolver" name = "main" + meta { + "foo" = "bar" + "gir" = "zim" + } default_subset = "v1" connect_timeout = "15s" subsets = { @@ -966,6 +1078,10 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "service-resolver" Name = "main" + Meta { + "foo" = "bar" + "gir" = "zim" + } DefaultSubset = "v1" ConnectTimeout = "15s" Subsets = { @@ -992,6 +1108,10 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-resolver", "name": "main", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "default_subset": "v1", "connect_timeout": "15s", "subsets": { @@ -1025,6 +1145,10 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-resolver", "Name": "main", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "DefaultSubset": "v1", "ConnectTimeout": "15s", "Subsets": { @@ -1055,8 +1179,12 @@ func TestParseConfigEntry(t *testing.T) { } `, expect: &api.ServiceResolverConfigEntry{ - Kind: "service-resolver", - Name: "main", + Kind: "service-resolver", + Name: "main", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, DefaultSubset: "v1", ConnectTimeout: 15 * time.Second, Subsets: map[string]api.ServiceResolverSubset{ @@ -1390,6 +1518,10 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "ingress-gateway" name = "ingress-web" + meta { + "foo" = "bar" + "gir" = "zim" + } tls { enabled = true } @@ -1413,6 +1545,10 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "ingress-gateway" Name = "ingress-web" + Meta { + "foo" = "bar" + "gir" = "zim" + } Tls { Enabled = true } @@ -1437,6 +1573,10 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "ingress-gateway", "name": "ingress-web", + "meta" : { + "foo": "bar", + "gir": "zim" + }, "tls": { "enabled": true }, @@ -1462,6 +1602,10 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "ingress-gateway", "Name": "ingress-web", + "Meta" : { + "foo": "bar", + "gir": "zim" + }, "Tls": { "Enabled": true }, @@ -1486,6 +1630,10 @@ func TestParseConfigEntry(t *testing.T) { expect: &api.IngressGatewayConfigEntry{ Kind: "ingress-gateway", Name: "ingress-web", + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, TLS: api.GatewayTLSConfig{ Enabled: true, }, diff --git a/website/pages/docs/agent/config-entries/ingress-gateway.mdx b/website/pages/docs/agent/config-entries/ingress-gateway.mdx index 01d1fbc39..6db703367 100644 --- a/website/pages/docs/agent/config-entries/ingress-gateway.mdx +++ b/website/pages/docs/agent/config-entries/ingress-gateway.mdx @@ -329,6 +329,8 @@ Also make two services in the frontend namespace available over a custom port wi the gateway is registered in. If omitted, the namespace will be inherited from [the request](/api/config#ns) or will default to the `default` namespace. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `TLS` `(TLSConfig: )` - TLS configuration for this gateway. - `Enabled` `(bool: false)` - Set this configuration to enable TLS for diff --git a/website/pages/docs/agent/config-entries/proxy-defaults.mdx b/website/pages/docs/agent/config-entries/proxy-defaults.mdx index 2753ffdb1..e58550c32 100644 --- a/website/pages/docs/agent/config-entries/proxy-defaults.mdx +++ b/website/pages/docs/agent/config-entries/proxy-defaults.mdx @@ -46,6 +46,8 @@ Config { - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `Config` `(map[string]arbitrary)` - An arbitrary map of configuration values used by Connect proxies. The available configurations depend on the Connect proxy you use. Any values that your proxy allows can be configured globally here. To diff --git a/website/pages/docs/agent/config-entries/service-defaults.mdx b/website/pages/docs/agent/config-entries/service-defaults.mdx index feb4e3ca5..af5aa1ec6 100644 --- a/website/pages/docs/agent/config-entries/service-defaults.mdx +++ b/website/pages/docs/agent/config-entries/service-defaults.mdx @@ -31,6 +31,8 @@ Protocol = "http" - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `Protocol` `(string: "tcp")` - Sets the protocol of the service. This is used by Connect proxies for things like observability features and to unlock usage of the [`service-splitter`](/docs/agent/config-entries/service-splitter) and diff --git a/website/pages/docs/agent/config-entries/service-resolver.mdx b/website/pages/docs/agent/config-entries/service-resolver.mdx index 7c418604a..e09ad0f0e 100644 --- a/website/pages/docs/agent/config-entries/service-resolver.mdx +++ b/website/pages/docs/agent/config-entries/service-resolver.mdx @@ -78,6 +78,10 @@ Name = "web" - `Name` `(string: )` - Set to the name of the service being configured. +- `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. + +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `ConnectTimeout` `(duration: 0s)` - The timeout for establishing new network connections to this service. diff --git a/website/pages/docs/agent/config-entries/service-router.mdx b/website/pages/docs/agent/config-entries/service-router.mdx index f16968600..55b73a77e 100644 --- a/website/pages/docs/agent/config-entries/service-router.mdx +++ b/website/pages/docs/agent/config-entries/service-router.mdx @@ -129,6 +129,10 @@ Routes = [ - `Name` `(string: )` - Set to the name of the service being configured. +- `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. + +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `Routes` `(array)` - The list of routes to consider when processing L7 requests. The first route to match in the list is terminal and stops further evaluation. Traffic that fails to match any of the provided diff --git a/website/pages/docs/agent/config-entries/service-splitter.mdx b/website/pages/docs/agent/config-entries/service-splitter.mdx index 792a8241b..3ca56ff4e 100644 --- a/website/pages/docs/agent/config-entries/service-splitter.mdx +++ b/website/pages/docs/agent/config-entries/service-splitter.mdx @@ -81,6 +81,10 @@ Splits = [ - `Name` `(string: )` - Set to the name of the service being configured. +- `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. + +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `Splits` `(array)` - Defines how much traffic to send to which set of service instances during a traffic split. The sum of weights across all splits must add up to 100. diff --git a/website/pages/docs/agent/config-entries/terminating-gateway.mdx b/website/pages/docs/agent/config-entries/terminating-gateway.mdx index cb8e2c94c..4752fbbf5 100644 --- a/website/pages/docs/agent/config-entries/terminating-gateway.mdx +++ b/website/pages/docs/agent/config-entries/terminating-gateway.mdx @@ -407,6 +407,8 @@ and configure default certificates for mutual TLS. Also override the SNI and CA If omitted, the namespace will be inherited from [the request](/api/config#ns) or will default to the `default` namespace. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. + - `Services` `(array: )` - A list of services to link with the gateway. The gateway will proxy traffic to these services. These linked services must be registered with Consul for the gateway to discover their addresses. They must also From 23bb4bd20c1dde7c3a1cafbeebcfc7d74c82a081 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Thu, 3 Sep 2020 11:08:59 -0500 Subject: [PATCH 32/73] docs: forgot to change this version number when the feature was backported (#8607) --- website/pages/docs/agent/config-entries/ingress-gateway.mdx | 2 +- website/pages/docs/agent/config-entries/proxy-defaults.mdx | 2 +- website/pages/docs/agent/config-entries/service-defaults.mdx | 2 +- website/pages/docs/agent/config-entries/service-resolver.mdx | 2 +- website/pages/docs/agent/config-entries/service-router.mdx | 2 +- website/pages/docs/agent/config-entries/service-splitter.mdx | 2 +- website/pages/docs/agent/config-entries/terminating-gateway.mdx | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/website/pages/docs/agent/config-entries/ingress-gateway.mdx b/website/pages/docs/agent/config-entries/ingress-gateway.mdx index 6db703367..1c5e96b0c 100644 --- a/website/pages/docs/agent/config-entries/ingress-gateway.mdx +++ b/website/pages/docs/agent/config-entries/ingress-gateway.mdx @@ -329,7 +329,7 @@ Also make two services in the frontend namespace available over a custom port wi the gateway is registered in. If omitted, the namespace will be inherited from [the request](/api/config#ns) or will default to the `default` namespace. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `TLS` `(TLSConfig: )` - TLS configuration for this gateway. diff --git a/website/pages/docs/agent/config-entries/proxy-defaults.mdx b/website/pages/docs/agent/config-entries/proxy-defaults.mdx index e58550c32..f91ced8fd 100644 --- a/website/pages/docs/agent/config-entries/proxy-defaults.mdx +++ b/website/pages/docs/agent/config-entries/proxy-defaults.mdx @@ -46,7 +46,7 @@ Config { - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `Config` `(map[string]arbitrary)` - An arbitrary map of configuration values used by Connect proxies. The available configurations depend on the Connect proxy you use. Any values diff --git a/website/pages/docs/agent/config-entries/service-defaults.mdx b/website/pages/docs/agent/config-entries/service-defaults.mdx index af5aa1ec6..d8e81cb1a 100644 --- a/website/pages/docs/agent/config-entries/service-defaults.mdx +++ b/website/pages/docs/agent/config-entries/service-defaults.mdx @@ -31,7 +31,7 @@ Protocol = "http" - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `Protocol` `(string: "tcp")` - Sets the protocol of the service. This is used by Connect proxies for things like observability features and to unlock usage diff --git a/website/pages/docs/agent/config-entries/service-resolver.mdx b/website/pages/docs/agent/config-entries/service-resolver.mdx index e09ad0f0e..cfdd3f6e4 100644 --- a/website/pages/docs/agent/config-entries/service-resolver.mdx +++ b/website/pages/docs/agent/config-entries/service-resolver.mdx @@ -80,7 +80,7 @@ Name = "web" - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `ConnectTimeout` `(duration: 0s)` - The timeout for establishing new network connections to this service. diff --git a/website/pages/docs/agent/config-entries/service-router.mdx b/website/pages/docs/agent/config-entries/service-router.mdx index 55b73a77e..ff6aba103 100644 --- a/website/pages/docs/agent/config-entries/service-router.mdx +++ b/website/pages/docs/agent/config-entries/service-router.mdx @@ -131,7 +131,7 @@ Routes = [ - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `Routes` `(array)` - The list of routes to consider when processing L7 requests. The first route to match in the list is terminal and diff --git a/website/pages/docs/agent/config-entries/service-splitter.mdx b/website/pages/docs/agent/config-entries/service-splitter.mdx index 3ca56ff4e..b164d34cb 100644 --- a/website/pages/docs/agent/config-entries/service-splitter.mdx +++ b/website/pages/docs/agent/config-entries/service-splitter.mdx @@ -83,7 +83,7 @@ Splits = [ - `Namespace` `(string: "default")` - Specifies the namespace the config entry will apply to. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `Splits` `(array)` - Defines how much traffic to send to which set of service instances during a traffic split. The sum of weights across diff --git a/website/pages/docs/agent/config-entries/terminating-gateway.mdx b/website/pages/docs/agent/config-entries/terminating-gateway.mdx index 4752fbbf5..c7e2a339a 100644 --- a/website/pages/docs/agent/config-entries/terminating-gateway.mdx +++ b/website/pages/docs/agent/config-entries/terminating-gateway.mdx @@ -407,7 +407,7 @@ and configure default certificates for mutual TLS. Also override the SNI and CA If omitted, the namespace will be inherited from [the request](/api/config#ns) or will default to the `default` namespace. -- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.9.0. +- `Meta` `(map: nil)` - Specifies arbitrary KV metadata pairs. Added in Consul 1.8.4. - `Services` `(array: )` - A list of services to link with the gateway. The gateway will proxy traffic to these services. These linked services From 330b73725fddd5a4fdd31cf79fa5daf567e0687c Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Thu, 2 Jul 2020 13:31:47 -0400 Subject: [PATCH 33/73] agent: add apiServers type for managing HTTP servers Remove Server field from HTTPServer. The field is no longer used. --- agent/agent.go | 133 ++++++++++++++------------------------ agent/agent_test.go | 2 +- agent/apiserver.go | 94 +++++++++++++++++++++++++++ agent/apiserver_test.go | 65 +++++++++++++++++++ agent/http.go | 10 ++- agent/http_test.go | 12 ++-- agent/testagent.go | 19 +++--- agent/ui_endpoint_test.go | 2 +- command/agent/agent.go | 3 + 9 files changed, 234 insertions(+), 106 deletions(-) create mode 100644 agent/apiserver.go create mode 100644 agent/apiserver_test.go diff --git a/agent/agent.go b/agent/agent.go index 0c639da9a..20486af12 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -259,10 +259,12 @@ type Agent struct { // dnsServer provides the DNS API dnsServers []*DNSServer - // httpServers provides the HTTP API on various endpoints - httpServers []*HTTPServer + // apiServers listening for connections. If any of these server goroutines + // fail, the agent will be shutdown. + apiServers *apiServers // wgServers is the wait group for all HTTP and DNS servers + // TODO: remove once dnsServers are handled by apiServers wgServers sync.WaitGroup // watchPlans tracks all the currently-running watch plans for the @@ -375,6 +377,9 @@ func New(bd BaseDeps) (*Agent, error) { a.loadTokens(a.config) a.loadEnterpriseTokens(a.config) + // TODO: pass in a fully populated apiServers into Agent.New + a.apiServers = NewAPIServers(a.logger) + return &a, nil } @@ -580,10 +585,7 @@ func (a *Agent) Start(ctx context.Context) error { // Start HTTP and HTTPS servers. for _, srv := range servers { - if err := a.serveHTTP(srv); err != nil { - return err - } - a.httpServers = append(a.httpServers, srv) + a.apiServers.Start(srv) } // Start gRPC server. @@ -605,6 +607,12 @@ func (a *Agent) Start(ctx context.Context) error { return nil } +// Failed returns a channel which is closed when the first server goroutine exits +// with a non-nil error. +func (a *Agent) Failed() <-chan struct{} { + return a.apiServers.failed +} + func (a *Agent) listenAndServeGRPC() error { if len(a.config.GRPCAddrs) < 1 { return nil @@ -737,14 +745,16 @@ func (a *Agent) startListeners(addrs []net.Addr) ([]net.Listener, error) { // // This approach should ultimately be refactored to the point where we just // start the server and any error should trigger a proper shutdown of the agent. -func (a *Agent) listenHTTP() ([]*HTTPServer, error) { +func (a *Agent) listenHTTP() ([]apiServer, error) { var ln []net.Listener - var servers []*HTTPServer + var servers []apiServer + start := func(proto string, addrs []net.Addr) error { listeners, err := a.startListeners(addrs) if err != nil { return err } + ln = append(ln, listeners...) for _, l := range listeners { var tlscfg *tls.Config @@ -754,18 +764,15 @@ func (a *Agent) listenHTTP() ([]*HTTPServer, error) { l = tls.NewListener(l, tlscfg) } + srv := &HTTPServer{ + agent: a, + denylist: NewDenylist(a.config.HTTPBlockEndpoints), + } httpServer := &http.Server{ Addr: l.Addr().String(), TLSConfig: tlscfg, + Handler: srv.handler(a.config.EnableDebug), } - srv := &HTTPServer{ - Server: httpServer, - ln: l, - agent: a, - denylist: NewDenylist(a.config.HTTPBlockEndpoints), - proto: proto, - } - httpServer.Handler = srv.handler(a.config.EnableDebug) // Load the connlimit helper into the server connLimitFn := a.httpConnLimiter.HTTPConnStateFuncWithDefault429Handler(10 * time.Millisecond) @@ -778,27 +785,39 @@ func (a *Agent) listenHTTP() ([]*HTTPServer, error) { httpServer.ConnState = connLimitFn } - ln = append(ln, l) - servers = append(servers, srv) + servers = append(servers, apiServer{ + Protocol: proto, + Addr: l.Addr(), + Shutdown: httpServer.Shutdown, + Run: func() error { + err := httpServer.Serve(l) + if err == nil || err == http.ErrServerClosed { + return nil + } + return fmt.Errorf("%s server %s failed: %w", proto, l.Addr(), err) + }, + }) } return nil } if err := start("http", a.config.HTTPAddrs); err != nil { - for _, l := range ln { - l.Close() - } + closeListeners(ln) return nil, err } if err := start("https", a.config.HTTPSAddrs); err != nil { - for _, l := range ln { - l.Close() - } + closeListeners(ln) return nil, err } return servers, nil } +func closeListeners(lns []net.Listener) { + for _, l := range lns { + l.Close() + } +} + // setupHTTPS adds HTTP/2 support, ConnState, and a connection handshake timeout // to the http.Server. func setupHTTPS(server *http.Server, connState func(net.Conn, http.ConnState), timeout time.Duration) error { @@ -860,43 +879,6 @@ func (a *Agent) listenSocket(path string) (net.Listener, error) { return l, nil } -func (a *Agent) serveHTTP(srv *HTTPServer) error { - // https://github.com/golang/go/issues/20239 - // - // In go.8.1 there is a race between Serve and Shutdown. If - // Shutdown is called before the Serve go routine was scheduled then - // the Serve go routine never returns. This deadlocks the agent - // shutdown for some tests since it will wait forever. - notif := make(chan net.Addr) - a.wgServers.Add(1) - go func() { - defer a.wgServers.Done() - notif <- srv.ln.Addr() - err := srv.Server.Serve(srv.ln) - if err != nil && err != http.ErrServerClosed { - a.logger.Error("error closing server", "error", err) - } - }() - - select { - case addr := <-notif: - if srv.proto == "https" { - a.logger.Info("Started HTTPS server", - "address", addr.String(), - "network", addr.Network(), - ) - } else { - a.logger.Info("Started HTTP server", - "address", addr.String(), - "network", addr.Network(), - ) - } - return nil - case <-time.After(time.Second): - return fmt.Errorf("agent: timeout starting HTTP servers") - } -} - // stopAllWatches stops all the currently running watches func (a *Agent) stopAllWatches() { for _, wp := range a.watchPlans { @@ -1395,13 +1377,12 @@ func (a *Agent) ShutdownAgent() error { // ShutdownEndpoints terminates the HTTP and DNS servers. Should be // preceded by ShutdownAgent. +// TODO: remove this method, move to ShutdownAgent func (a *Agent) ShutdownEndpoints() { a.shutdownLock.Lock() defer a.shutdownLock.Unlock() - if len(a.dnsServers) == 0 && len(a.httpServers) == 0 { - return - } + ctx := context.TODO() for _, srv := range a.dnsServers { if srv.Server != nil { @@ -1415,27 +1396,11 @@ func (a *Agent) ShutdownEndpoints() { } a.dnsServers = nil - for _, srv := range a.httpServers { - a.logger.Info("Stopping server", - "protocol", strings.ToUpper(srv.proto), - "address", srv.ln.Addr().String(), - "network", srv.ln.Addr().Network(), - ) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - srv.Server.Shutdown(ctx) - if ctx.Err() == context.DeadlineExceeded { - a.logger.Warn("Timeout stopping server", - "protocol", strings.ToUpper(srv.proto), - "address", srv.ln.Addr().String(), - "network", srv.ln.Addr().Network(), - ) - } - } - a.httpServers = nil - + a.apiServers.Shutdown(ctx) a.logger.Info("Waiting for endpoints to shut down") - a.wgServers.Wait() + if err := a.apiServers.WaitForShutdown(); err != nil { + a.logger.Error(err.Error()) + } a.logger.Info("Endpoints down") } diff --git a/agent/agent_test.go b/agent/agent_test.go index 479421f59..472f1b652 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -1917,7 +1917,7 @@ func TestAgent_HTTPCheck_EnableAgentTLSForChecks(t *testing.T) { Status: api.HealthCritical, } - url := fmt.Sprintf("https://%s/v1/agent/self", a.srv.ln.Addr().String()) + url := fmt.Sprintf("https://%s/v1/agent/self", a.HTTPAddr()) chk := &structs.CheckType{ HTTP: url, Interval: 20 * time.Millisecond, diff --git a/agent/apiserver.go b/agent/apiserver.go new file mode 100644 index 000000000..27087829a --- /dev/null +++ b/agent/apiserver.go @@ -0,0 +1,94 @@ +package agent + +import ( + "context" + "net" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "golang.org/x/sync/errgroup" +) + +// apiServers is a wrapper around errgroup.Group for managing go routines for +// long running agent components (ex: http server, dns server). If any of the +// servers fail, the failed channel will be closed, which will cause the agent +// to be shutdown instead of running in a degraded state. +// +// This struct exists as a shim for using errgroup.Group without making major +// changes to Agent. In the future it may be removed and replaced with more +// direct usage of errgroup.Group. +type apiServers struct { + logger hclog.Logger + group *errgroup.Group + servers []apiServer + // failed channel is closed when the first server goroutines exit with a + // non-nil error. + failed <-chan struct{} +} + +type apiServer struct { + // Protocol supported by this server. One of: dns, http, https + Protocol string + // Addr the server is listening on + Addr net.Addr + // Run will be called in a goroutine to run the server. When any Run exits + // with a non-nil error, the failed channel will be closed. + Run func() error + // Shutdown function used to stop the server + Shutdown func(context.Context) error +} + +// NewAPIServers returns an empty apiServers that is ready to Start servers. +func NewAPIServers(logger hclog.Logger) *apiServers { + group, ctx := errgroup.WithContext(context.TODO()) + return &apiServers{ + logger: logger, + group: group, + failed: ctx.Done(), + } +} + +func (s *apiServers) Start(srv apiServer) { + srv.logger(s.logger).Info("Starting server") + s.servers = append(s.servers, srv) + s.group.Go(srv.Run) +} + +func (s apiServer) logger(base hclog.Logger) hclog.Logger { + return base.With( + "protocol", s.Protocol, + "address", s.Addr.String(), + "network", s.Addr.Network()) +} + +// Shutdown all the servers and log any errors as warning. Each server is given +// 1 second, or until ctx is cancelled, to shutdown gracefully. +func (s *apiServers) Shutdown(ctx context.Context) { + shutdownGroup := new(sync.WaitGroup) + + for i := range s.servers { + server := s.servers[i] + shutdownGroup.Add(1) + + go func() { + defer shutdownGroup.Done() + logger := server.logger(s.logger) + logger.Info("Stopping server") + + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + if err := server.Shutdown(ctx); err != nil { + logger.Warn("Failed to stop server") + } + }() + } + s.servers = nil + shutdownGroup.Wait() +} + +// WaitForShutdown waits until all server goroutines have exited. Shutdown +// must be called before WaitForShutdown, otherwise it will block forever. +func (s *apiServers) WaitForShutdown() error { + return s.group.Wait() +} diff --git a/agent/apiserver_test.go b/agent/apiserver_test.go new file mode 100644 index 000000000..72f8c6d65 --- /dev/null +++ b/agent/apiserver_test.go @@ -0,0 +1,65 @@ +package agent + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestAPIServers_WithServiceRunError(t *testing.T) { + servers := NewAPIServers(hclog.New(nil)) + + server1, chErr1 := newAPIServerStub() + server2, _ := newAPIServerStub() + + t.Run("Start", func(t *testing.T) { + servers.Start(server1) + servers.Start(server2) + + select { + case <-servers.failed: + t.Fatalf("expected servers to still be running") + case <-time.After(5 * time.Millisecond): + } + }) + + err := fmt.Errorf("oops, I broke") + + t.Run("server exit non-nil error", func(t *testing.T) { + chErr1 <- err + + select { + case <-servers.failed: + case <-time.After(time.Second): + t.Fatalf("expected failed channel to be closed") + } + }) + + t.Run("shutdown remaining services", func(t *testing.T) { + servers.Shutdown(context.Background()) + require.Equal(t, err, servers.WaitForShutdown()) + }) +} + +func newAPIServerStub() (apiServer, chan error) { + chErr := make(chan error) + return apiServer{ + Protocol: "http", + Addr: &net.TCPAddr{ + IP: net.ParseIP("127.0.0.11"), + Port: 5505, + }, + Run: func() error { + return <-chErr + }, + Shutdown: func(ctx context.Context) error { + close(chErr) + return nil + }, + }, chErr +} diff --git a/agent/http.go b/agent/http.go index bac6c172c..dc9438230 100644 --- a/agent/http.go +++ b/agent/http.go @@ -80,16 +80,14 @@ func (e ForbiddenError) Error() string { } // HTTPServer provides an HTTP api for an agent. +// +// TODO: rename this struct to something more appropriate. It is an http.Handler, +// request router or multiplexer, but it is not a Server. type HTTPServer struct { - // TODO(dnephin): remove Server field, it is not used by any of the HTTPServer methods - Server *http.Server - ln net.Listener agent *Agent denylist *Denylist - - // proto is filled by the agent to "http" or "https". - proto string } + type templatedFile struct { templated *bytes.Reader name string diff --git a/agent/http_test.go b/agent/http_test.go index 6574b8918..36ecf387b 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -1353,7 +1353,7 @@ func TestHTTPServer_HandshakeTimeout(t *testing.T) { // Connect to it with a plain TCP client that doesn't attempt to send HTTP or // complete a TLS handshake. - conn, err := net.Dial("tcp", a.srv.ln.Addr().String()) + conn, err := net.Dial("tcp", a.HTTPAddr()) require.NoError(t, err) defer conn.Close() @@ -1413,7 +1413,7 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) { }) defer a.Shutdown() - addr := a.srv.ln.Addr() + addr := a.HTTPAddr() assertConn := func(conn net.Conn, wantOpen bool) { retry.Run(t, func(r *retry.R) { @@ -1433,21 +1433,21 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) { } // Connect to the server with bare TCP - conn1, err := net.DialTimeout("tcp", addr.String(), time.Second) + conn1, err := net.DialTimeout("tcp", addr, time.Second) require.NoError(t, err) defer conn1.Close() assertConn(conn1, true) // Two conns should succeed - conn2, err := net.DialTimeout("tcp", addr.String(), time.Second) + conn2, err := net.DialTimeout("tcp", addr, time.Second) require.NoError(t, err) defer conn2.Close() assertConn(conn2, true) // Third should succeed negotiating TCP handshake... - conn3, err := net.DialTimeout("tcp", addr.String(), time.Second) + conn3, err := net.DialTimeout("tcp", addr, time.Second) require.NoError(t, err) defer conn3.Close() @@ -1460,7 +1460,7 @@ func TestRPC_HTTPSMaxConnsPerClient(t *testing.T) { require.NoError(t, a.reloadConfigInternal(&newCfg)) // Now another conn should be allowed - conn4, err := net.DialTimeout("tcp", addr.String(), time.Second) + conn4, err := net.DialTimeout("tcp", addr, time.Second) require.NoError(t, err) defer conn4.Close() diff --git a/agent/testagent.go b/agent/testagent.go index 8f05b6ed4..fa3508ffa 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -73,8 +73,7 @@ type TestAgent struct { // It is valid after Start(). dns *DNSServer - // srv is a reference to the first started HTTP endpoint. - // It is valid after Start(). + // srv is an HTTPServer that may be used to test http endpoints. srv *HTTPServer // overrides is an hcl config source to use to override otherwise @@ -213,6 +212,8 @@ func (a *TestAgent) Start(t *testing.T) (err error) { // Start the anti-entropy syncer a.Agent.StartSync() + a.srv = &HTTPServer{agent: agent, denylist: NewDenylist(a.config.HTTPBlockEndpoints)} + if err := a.waitForUp(); err != nil { a.Shutdown() t.Logf("Error while waiting for test agent to start: %v", err) @@ -220,7 +221,6 @@ func (a *TestAgent) Start(t *testing.T) (err error) { } a.dns = a.dnsServers[0] - a.srv = a.httpServers[0] return nil } @@ -233,7 +233,7 @@ func (a *TestAgent) waitForUp() error { var retErr error var out structs.IndexedNodes for ; !time.Now().After(deadline); time.Sleep(timer.Wait) { - if len(a.httpServers) == 0 { + if len(a.apiServers.servers) == 0 { retErr = fmt.Errorf("waiting for server") continue // fail, try again } @@ -262,7 +262,7 @@ func (a *TestAgent) waitForUp() error { } else { req := httptest.NewRequest("GET", "/v1/agent/self", nil) resp := httptest.NewRecorder() - _, err := a.httpServers[0].AgentSelf(resp, req) + _, err := a.srv.AgentSelf(resp, req) if acl.IsErrPermissionDenied(err) || resp.Code == 403 { // permission denied is enough to show that the client is // connected to the servers as it would get a 503 if @@ -313,10 +313,13 @@ func (a *TestAgent) DNSAddr() string { } func (a *TestAgent) HTTPAddr() string { - if a.srv == nil { - return "" + var srv apiServer + for _, srv = range a.Agent.apiServers.servers { + if srv.Protocol == "http" { + break + } } - return a.srv.Server.Addr + return srv.Addr.String() } func (a *TestAgent) SegmentAddr(name string) string { diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index 876d4a97c..4640bcfeb 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -41,7 +41,7 @@ func TestUiIndex(t *testing.T) { // Register node req, _ := http.NewRequest("GET", "/ui/my-file", nil) req.URL.Scheme = "http" - req.URL.Host = a.srv.Server.Addr + req.URL.Host = a.HTTPAddr() // Make the request client := cleanhttp.DefaultClient() diff --git a/command/agent/agent.go b/command/agent/agent.go index 7da661306..1e06ef90b 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -288,6 +288,9 @@ func (c *cmd) run(args []string) int { case err := <-agent.RetryJoinCh(): c.logger.Error("Retry join failed", "error", err) return 1 + case <-agent.Failed(): + // The deferred Shutdown method will log the appropriate error + return 1 case <-agent.ShutdownCh(): // agent is already down! return 0 From 9da56d95aa1164ee04feaec136eab5e072ae9b1b Mon Sep 17 00:00:00 2001 From: Tyler Ryan Date: Thu, 3 Sep 2020 14:25:54 -0400 Subject: [PATCH 34/73] =?UTF-8?q?Clarifying=20service=20definition=20requi?= =?UTF-8?q?rements=20for=20registering=20a=20service=20=E2=80=A6=20(#8608)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- website/pages/docs/discovery/services.mdx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/pages/docs/discovery/services.mdx b/website/pages/docs/discovery/services.mdx index 5622012c2..41ec55d2a 100644 --- a/website/pages/docs/discovery/services.mdx +++ b/website/pages/docs/discovery/services.mdx @@ -135,6 +135,14 @@ Services may also contain a `token` field to provide an ACL token. This token is used for any interaction with the catalog for the service, including [anti-entropy syncs](/docs/internals/anti-entropy) and deregistration. +Services registered in Consul clusters where both [Consul Namespaces](/docs/enterprise/namespaces) +and the ACL system are enabled can be registered to specific namespaces that are associated with +ACL tokens scoped to that namespace. Services registed with a service definition +will not inherit the namespace associated with the ACL token specified in the `token` +field. The `namespace` field, in addition to the `token` field, must be +included in the service definition for the service to be registered to the +namespace that the ACL token is scoped to. + The `enable_tag_override` can optionally be specified to disable the anti-entropy feature for this service. If `enable_tag_override` is set to `TRUE` then external agents can update this service in the From 81cc3daf695aab840c216a138f2a3017f07b3eb1 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 14 Jul 2020 19:23:44 -0400 Subject: [PATCH 35/73] stream: have SnapshotFunc accept a non-pointer SubscribeRequest The value is not expected to be modified. Passing a value makes that explicit. --- agent/consul/state/store_integration_test.go | 2 +- agent/consul/stream/event_publisher.go | 6 +++++- agent/consul/stream/event_publisher_test.go | 4 ++-- agent/consul/stream/event_snapshot.go | 6 ++---- agent/consul/stream/event_snapshot_test.go | 4 ++-- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/agent/consul/state/store_integration_test.go b/agent/consul/state/store_integration_test.go index 83a978bb0..6b2e9d1fe 100644 --- a/agent/consul/state/store_integration_test.go +++ b/agent/consul/state/store_integration_test.go @@ -376,7 +376,7 @@ var topicService stream.Topic = topic("test-topic-service") func newTestSnapshotHandlers(s *Store) stream.SnapshotHandlers { return stream.SnapshotHandlers{ - topicService: func(req *stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { + topicService: func(req stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { idx, nodes, err := s.ServiceNodes(nil, req.Key, nil) if err != nil { return idx, err diff --git a/agent/consul/stream/event_publisher.go b/agent/consul/stream/event_publisher.go index 9dfb8bf9e..815a68a26 100644 --- a/agent/consul/stream/event_publisher.go +++ b/agent/consul/stream/event_publisher.go @@ -61,7 +61,11 @@ type changeEvents struct { // SnapshotHandlers is a mapping of Topic to a function which produces a snapshot // of events for the SubscribeRequest. Events are appended to the snapshot using SnapshotAppender. // The nil Topic is reserved and should not be used. -type SnapshotHandlers map[Topic]func(*SubscribeRequest, SnapshotAppender) (index uint64, err error) +type SnapshotHandlers map[Topic]SnapshotFunc + +// SnapshotFunc builds a snapshot for the subscription request, and appends the +// events to the Snapshot using SnapshotAppender. +type SnapshotFunc func(SubscribeRequest, SnapshotAppender) (index uint64, err error) // SnapshotAppender appends groups of events to create a Snapshot of state. type SnapshotAppender interface { diff --git a/agent/consul/stream/event_publisher_test.go b/agent/consul/stream/event_publisher_test.go index 4deeb1503..4448e6845 100644 --- a/agent/consul/stream/event_publisher_test.go +++ b/agent/consul/stream/event_publisher_test.go @@ -58,7 +58,7 @@ func TestEventPublisher_PublishChangesAndSubscribe_WithSnapshot(t *testing.T) { func newTestSnapshotHandlers() SnapshotHandlers { return SnapshotHandlers{ - testTopic: func(req *SubscribeRequest, buf SnapshotAppender) (uint64, error) { + testTopic: func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { if req.Topic != testTopic { return 0, fmt.Errorf("unexpected topic: %v", req.Topic) } @@ -117,7 +117,7 @@ func TestEventPublisher_ShutdownClosesSubscriptions(t *testing.T) { t.Cleanup(cancel) handlers := newTestSnapshotHandlers() - fn := func(req *SubscribeRequest, buf SnapshotAppender) (uint64, error) { + fn := func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { return 0, nil } handlers[intTopic(22)] = fn diff --git a/agent/consul/stream/event_snapshot.go b/agent/consul/stream/event_snapshot.go index 12a52ea37..2f0d276f7 100644 --- a/agent/consul/stream/event_snapshot.go +++ b/agent/consul/stream/event_snapshot.go @@ -18,8 +18,6 @@ type eventSnapshot struct { snapBuffer *eventBuffer } -type snapFunc func(req *SubscribeRequest, buf SnapshotAppender) (uint64, error) - // newEventSnapshot creates a snapshot buffer based on the subscription request. // The current buffer head for the topic requested is passed so that once the // snapshot is complete and has been delivered into the buffer, any events @@ -27,7 +25,7 @@ type snapFunc func(req *SubscribeRequest, buf SnapshotAppender) (uint64, error) // missed. Once the snapshot is delivered the topic buffer is spliced onto the // snapshot buffer so that subscribers will naturally follow from the snapshot // to wait for any subsequent updates. -func newEventSnapshot(req *SubscribeRequest, topicBufferHead *bufferItem, fn snapFunc) *eventSnapshot { +func newEventSnapshot(req *SubscribeRequest, topicBufferHead *bufferItem, fn SnapshotFunc) *eventSnapshot { buf := newEventBuffer() s := &eventSnapshot{ Head: buf.Head(), @@ -35,7 +33,7 @@ func newEventSnapshot(req *SubscribeRequest, topicBufferHead *bufferItem, fn sna } go func() { - idx, err := fn(req, s.snapBuffer) + idx, err := fn(*req, s.snapBuffer) if err != nil { s.snapBuffer.AppendItem(&bufferItem{Err: err}) return diff --git a/agent/consul/stream/event_snapshot_test.go b/agent/consul/stream/event_snapshot_test.go index 5e62e7f94..c888e844a 100644 --- a/agent/consul/stream/event_snapshot_test.go +++ b/agent/consul/stream/event_snapshot_test.go @@ -161,8 +161,8 @@ func genSequentialIDs(start, end int) []string { return ids } -func testHealthConsecutiveSnapshotFn(size int, index uint64) snapFunc { - return func(req *SubscribeRequest, buf SnapshotAppender) (uint64, error) { +func testHealthConsecutiveSnapshotFn(size int, index uint64) SnapshotFunc { + return func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) { for i := 0; i < size; i++ { // Event content is arbitrary we are just using Health because it's the // first type defined. We just want a set of things with consecutive From 6a1a43721d51cb833e0bb92f85152f3b542630c9 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 21 Jul 2020 18:20:34 -0400 Subject: [PATCH 36/73] state: fix bug in changeTrackerDB.publish Creating a new readTxn does not work because it will not see the newly created objects that are about to be committed. Instead use the active write Txn. --- agent/consul/state/memdb.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 895da9e06..1aaa45dd8 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -85,11 +85,8 @@ func (c *changeTrackerDB) WriteTxn(idx uint64) *txn { return t } -func (c *changeTrackerDB) publish(changes Changes) error { - readOnlyTx := c.db.Txn(false) - defer readOnlyTx.Abort() - - events, err := c.processChanges(readOnlyTx, changes) +func (c *changeTrackerDB) publish(tx ReadTxn, changes Changes) error { + events, err := c.processChanges(tx, changes) if err != nil { return fmt.Errorf("failed generating events from changes: %v", err) } @@ -127,7 +124,7 @@ type txn struct { // Index is stored so that it may be passed along to any subscribers as part // of a change event. Index uint64 - publish func(changes Changes) error + publish func(tx ReadTxn, changes Changes) error } // Commit first pushes changes to EventPublisher, then calls Commit on the @@ -152,7 +149,7 @@ func (tx *txn) Commit() error { // In those cases changes should also be empty, and there will be nothing // to publish. if tx.publish != nil { - if err := tx.publish(changes); err != nil { + if err := tx.publish(tx.Txn, changes); err != nil { return err } } From fdfe176deb13fb66127f464c573df79b70984765 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Wed, 15 Jul 2020 11:54:50 -0400 Subject: [PATCH 37/73] state: Add Change processor and snapshotter for service health Co-authored-by: Paul Banks --- agent/consul/state/catalog_events.go | 494 +++++++ agent/consul/state/catalog_events_test.go | 1492 +++++++++++++++++++++ agent/consul/state/memdb.go | 31 +- agent/consul/state/state_store.go | 18 +- 4 files changed, 2022 insertions(+), 13 deletions(-) create mode 100644 agent/consul/state/catalog_events.go create mode 100644 agent/consul/state/catalog_events_test.go diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go new file mode 100644 index 000000000..0214b7ef7 --- /dev/null +++ b/agent/consul/state/catalog_events.go @@ -0,0 +1,494 @@ +package state + +import ( + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + memdb "github.com/hashicorp/go-memdb" +) + +// ServiceHealthSnapshot is a stream.SnapFn that provides a streaming snapshot +// of stream.Events that describe the current state of a service health query. +func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + tx := s.db.Txn(false) + defer tx.Abort() + // TODO(namespace-streaming): plumb entMeta through from SubscribeRequest + idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, false, nil) + if err != nil { + return 0, err + } + + _, err = checkServiceNodesToServiceHealth(idx, nodes, buf, TopicServiceHealth) + return idx, err +} + +// ServiceHealthSnapshot is a stream.SnapFn that provides a streaming snapshot +// of stream.Events that describe the current state of a service connect health +// query. +func (s *Store) ServiceHealthConnectSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + tx := s.db.Txn(false) + defer tx.Abort() + // TODO(namespace-streaming): plumb entMeta through from SubscribeRequest + idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, true, nil) + if err != nil { + return 0, err + } + + _, err = checkServiceNodesToServiceHealth(idx, nodes, buf, TopicServiceHealthConnect) + return idx, err +} + +type changeOp int + +const ( + OpDelete changeOp = iota + OpCreate + OpUpdate +) + +type eventPayload struct { + Op changeOp + Obj interface{} +} + +// checkServiceNodesToServiceHealth converts a list of structs.CheckServiceNodes +// to stream.ServiceHealth events for streaming. If a non-nil event buffer is +// passed, events are appended to the buffer one at a time and an nil slice is +// returned to avoid keeping a full copy in memory. +func checkServiceNodesToServiceHealth(idx uint64, nodes structs.CheckServiceNodes, + buf stream.SnapshotAppender, topic topic) ([]stream.Event, error) { + var events []stream.Event + for _, n := range nodes { + event := stream.Event{ + Index: idx, + Topic: topic, + Payload: eventPayload{ + Op: OpCreate, + Obj: &n, + }, + } + + if n.Service != nil { + event.Key = n.Service.Service + } + + // TODO: always called with a non-nil buf? + if buf != nil { + buf.Append([]stream.Event{event}) + } else { + events = append(events, event) + } + } + return events, nil +} + +type nodeServiceTuple struct { + Node string + ServiceID string + EntMeta structs.EnterpriseMeta +} + +// ServiceHealthEventsFromChanges returns all the service and Connect health +// events that should be emitted given a set of changes to the state store. +func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { + var events []stream.Event + + var nodeChanges map[string]*memdb.Change + var serviceChanges map[nodeServiceTuple]*memdb.Change + + markNode := func(node string, nodeChange *memdb.Change) { + if nodeChanges == nil { + nodeChanges = make(map[string]*memdb.Change) + } + // If the caller has an actual node mutation ensure we store it even if the + // node is already marked. If the caller is just marking the node dirty + // without an node change, don't overwrite any existing node change we know + // about. + ch := nodeChanges[node] + if ch == nil { + nodeChanges[node] = nodeChange + } + } + markService := func(node, service string, entMeta structs.EnterpriseMeta, svcChange *memdb.Change) { + if serviceChanges == nil { + serviceChanges = make(map[nodeServiceTuple]*memdb.Change) + } + k := nodeServiceTuple{ + Node: node, + ServiceID: service, + EntMeta: entMeta, + } + // If the caller has an actual service mutation ensure we store it even if + // the service is already marked. If the caller is just marking the service + // dirty without an node change, don't overwrite any existing node change we + // know about. + ch := serviceChanges[k] + if ch == nil { + serviceChanges[k] = svcChange + } + } + + for _, change := range changes.Changes { + switch change.Table { + case "nodes": + // Node changed in some way, if it's not a delete, we'll need to + // re-deliver CheckServiceNode results for all services on that node but + // we mark it anyway because if it _is_ a delete then we need to know that + // later to avoid trying to deliver events when node level checks mark the + // node as "changed". + nRaw := change.After + if change.After == nil { + nRaw = change.Before + } + n := nRaw.(*structs.Node) + changeCopy := change + markNode(n.Node, &changeCopy) + + case "services": + snRaw := change.After + if change.After == nil { + snRaw = change.Before + } + sn := snRaw.(*structs.ServiceNode) + changeCopy := change + markService(sn.Node, sn.ServiceID, sn.EnterpriseMeta, &changeCopy) + + case "checks": + // For health we only care about the scope for now to know if it's just + // affecting a single service or every service on a node. There is a + // subtle edge case where the check with same ID changes from being node + // scoped to service scoped or vice versa, in either case we need to treat + // it as affecting all services on the node. + switch { + case change.Updated(): + before := change.Before.(*structs.HealthCheck) + after := change.After.(*structs.HealthCheck) + if after.ServiceID == "" || before.ServiceID == "" { + // Either changed from or to being node-scoped + markNode(after.Node, nil) + } else { + // Check changed which means we just need to emit for the linked + // service. + markService(after.Node, after.ServiceID, after.EnterpriseMeta, nil) + + // Edge case - if the check with same ID was updated to link to a + // different service ID but the old service with old ID still exists, + // then the old service instance needs updating too as it has one + // fewer checks now. + if before.ServiceID != after.ServiceID { + markService(before.Node, before.ServiceID, before.EnterpriseMeta, nil) + } + } + + case change.Deleted(): + before := change.Before.(*structs.HealthCheck) + if before.ServiceID == "" { + // Node level check + markNode(before.Node, nil) + } else { + markService(before.Node, before.ServiceID, before.EnterpriseMeta, nil) + } + + case change.Created(): + after := change.After.(*structs.HealthCheck) + if after.ServiceID == "" { + // Node level check + markNode(after.Node, nil) + } else { + markService(after.Node, after.ServiceID, after.EnterpriseMeta, nil) + } + } + } + } + + // Now act on those marked nodes/services + for node, change := range nodeChanges { + // change may be nil if there was a change that _affected_ the node + // like a change to checks but it didn't actually change the node + // record itself. + if change != nil && change.Deleted() { + // Node deletions are a no-op here since the state store transaction will + // have also removed all the service instances which will be handled in + // the loop below. + continue + } + // Rebuild events for all services on this node + es, err := serviceHealthEventsForNode(tx, changes.Index, node) + if err != nil { + return nil, err + } + events = append(events, es...) + } + + for tuple, change := range serviceChanges { + // change may be nil if there was a change that _affected_ the service + // like a change to checks but it didn't actually change the service + // record itself. + if change != nil && change.Deleted() { + // Generate delete event for the service instance and append it + sn := change.Before.(*structs.ServiceNode) + es, err := serviceHealthDeregEventsForServiceInstance(changes.Index, sn, &tuple.EntMeta) + if err != nil { + return nil, err + } + events = append(events, es...) + continue + } + + // Check if this was a service mutation that changed it's name which + // requires special handling even if node changed and new events were + // already published. + if change != nil && change.Updated() { + before := change.Before.(*structs.ServiceNode) + after := change.After.(*structs.ServiceNode) + + if before.ServiceName != after.ServiceName { + // Service was renamed, the code below will ensure the new registrations + // go out to subscribers to the new service name topic key, but we need + // to fix up subscribers that were watching the old name by sending + // deregistrations. + es, err := serviceHealthDeregEventsForServiceInstance(changes.Index, before, &tuple.EntMeta) + if err != nil { + return nil, err + } + events = append(events, es...) + } + + if before.ServiceKind == structs.ServiceKindConnectProxy && + before.ServiceProxy.DestinationServiceName != after.ServiceProxy.DestinationServiceName { + // Connect proxy changed the service it is representing, need to issue a + // dereg for the old service on the Connect topic. We don't actually need + // to deregister this sidecar service though as it still exists and + // didn't change its name (or if it did that was caught just above). But + // our mechanism for connect events is to convert them so we generate + // the regular one, convert it to Connect topic and then discar the + // original. + es, err := serviceHealthDeregEventsForServiceInstance(changes.Index, before, &tuple.EntMeta) + if err != nil { + return nil, err + } + // Don't append es per comment above, but convert it to connect topic + // events. + es = serviceHealthToConnectEvents(es) + events = append(events, es...) + } + } + + if _, ok := nodeChanges[tuple.Node]; ok { + // We already rebuilt events for everything on this node, no need to send + // a duplicate. + continue + } + // Build service event and append it + es, err := serviceHealthEventsForServiceInstance(tx, changes.Index, tuple) + if err != nil { + return nil, err + } + events = append(events, es...) + } + + // Duplicate any events that affected connect-enabled instances (proxies or + // native apps) to the relevant Connect topic. + events = append(events, serviceHealthToConnectEvents(events)...) + + return events, nil +} + +// serviceHealthToConnectEvents converts already formatted service health +// registration events into the ones needed to publish to the Connect topic. +// This essentially means filtering out any instances that are not Connect +// enabled and so of no interest to those subscribers but also involves +// switching connection details to be the proxy instead of the actual instance +// in case of a sidecar. +func serviceHealthToConnectEvents(events []stream.Event) []stream.Event { + serviceHealthConnectEvents := make([]stream.Event, 0, len(events)) + for _, event := range events { + if event.Topic != TopicServiceHealth { + // Skip non-health or any events already emitted to Connect topic + continue + } + node := getPayloadCheckServiceNode(event.Payload) + if node.Service == nil || + (node.Service.Kind != structs.ServiceKindConnectProxy && !node.Service.Connect.Native) { + // Event is not a service instance (i.e. just a node registration) + // or is not a service that is not connect-enabled in some way. + continue + } + + connectEvent := event + connectEvent.Topic = TopicServiceHealthConnect + + // If this is a proxy, set the key to the destination service name. + if node.Service.Kind == structs.ServiceKindConnectProxy { + connectEvent.Key = node.Service.Proxy.DestinationServiceName + } + + serviceHealthConnectEvents = append(serviceHealthConnectEvents, connectEvent) + } + + return serviceHealthConnectEvents +} + +func getPayloadCheckServiceNode(payload interface{}) *structs.CheckServiceNode { + ep, ok := payload.(eventPayload) + if !ok { + return nil + } + csn, ok := ep.Obj.(*structs.CheckServiceNode) + if !ok { + return nil + } + return csn +} + +// serviceHealthEventsForNode returns health events for all services on the +// given node. This mirrors some of the the logic in the oddly-named +// parseCheckServiceNodes but is more efficient since we know they are all on +// the same node. +func serviceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) { + // TODO(namespace-streaming): figure out the right EntMeta and mystery arg. + services, err := catalogServiceListByNode(tx, node, nil, false) + if err != nil { + return nil, err + } + + n, nodeChecks, svcChecks, err := getNodeAndChecks(tx, node) + if err != nil { + return nil, err + } + + var events []stream.Event + for service := services.Next(); service != nil; service = services.Next() { + sn := service.(*structs.ServiceNode) + + es, err := serviceHealthEventsForServiceNodeInternal(idx, n, sn, nodeChecks, svcChecks) + if err != nil { + return nil, err + } + + // Append to the results. + events = append(events, es...) + } + + return events, nil +} + +// getNodeAndNodeChecks returns a specific node and ALL checks on that node +// (both node specific and service-specific). node-level Checks are returned as +// a slice, service-specific checks as a map of slices with the service id as +// the map key. +func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, + structs.HealthChecks, map[string]structs.HealthChecks, error) { + // Fetch the node + nodeRaw, err := tx.First("nodes", "id", node) + if err != nil { + return nil, nil, nil, err + } + if nodeRaw == nil { + return nil, nil, nil, ErrMissingNode + } + n := nodeRaw.(*structs.Node) + + // TODO(namespace-streaming): work out what EntMeta is needed here, wildcard? + iter, err := catalogListChecksByNode(tx, node, nil) + if err != nil { + return nil, nil, nil, err + } + + var nodeChecks structs.HealthChecks + var svcChecks map[string]structs.HealthChecks + + for check := iter.Next(); check != nil; check = iter.Next() { + check := check.(*structs.HealthCheck) + if check.ServiceID == "" { + nodeChecks = append(nodeChecks, check) + } else { + if svcChecks == nil { + svcChecks = make(map[string]structs.HealthChecks) + } + svcChecks[check.ServiceID] = append(svcChecks[check.ServiceID], check) + } + } + return n, nodeChecks, svcChecks, nil +} + +func serviceHealthEventsForServiceInstance(tx ReadTxn, idx uint64, tuple nodeServiceTuple) ([]stream.Event, error) { + n, nodeChecks, svcChecks, err := getNodeAndChecks(tx, tuple.Node) + if err != nil { + return nil, err + } + + svc, err := getCompoundWithTxn(tx, "services", "id", &tuple.EntMeta, tuple.Node, tuple.ServiceID) + if err != nil { + return nil, err + } + + sn := svc.Next() + if sn == nil { + return nil, ErrMissingService + } + + return serviceHealthEventsForServiceNodeInternal(idx, n, sn.(*structs.ServiceNode), nodeChecks, svcChecks) +} + +func serviceHealthEventsForServiceNodeInternal(idx uint64, + node *structs.Node, + sn *structs.ServiceNode, + nodeChecks structs.HealthChecks, + svcChecks map[string]structs.HealthChecks) ([]stream.Event, error) { + + // Start with a copy of the node checks. + checks := nodeChecks + for _, check := range svcChecks[sn.ServiceID] { + checks = append(checks, check) + } + + csn := &structs.CheckServiceNode{ + Node: node, + Service: sn.ToNodeService(), + Checks: checks, + } + e := stream.Event{ + Topic: TopicServiceHealth, + Key: sn.ServiceName, + Index: idx, + Payload: eventPayload{ + Op: OpCreate, + Obj: csn, + }, + } + + // See if we also need to emit a connect event (i.e. if this instance is a + // connect proxy or connect native app). + + return []stream.Event{e}, nil +} + +func serviceHealthDeregEventsForServiceInstance(idx uint64, + sn *structs.ServiceNode, entMeta *structs.EnterpriseMeta) ([]stream.Event, error) { + + // We actually only need the node name populated in the node part as it's only + // used as a key to know which service was deregistered so don't bother looking + // up the node in the DB. Note that while the ServiceNode does have NodeID + // etc. fields, they are never populated in memdb per the comment on that + // struct and only filled in when we return copies of the result to users. + // This is also important because if the service was deleted as part of a + // whole node deregistering then the node record won't actually exist now + // anyway and we'd have to plumb it through from the changeset above. + csn := &structs.CheckServiceNode{ + Node: &structs.Node{ + Node: sn.Node, + }, + Service: sn.ToNodeService(), + } + + e := stream.Event{ + Topic: TopicServiceHealth, + Key: sn.ServiceName, + Index: idx, + Payload: eventPayload{ + Op: OpDelete, + Obj: csn, + }, + } + return []stream.Event{e}, nil +} diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go new file mode 100644 index 000000000..5cf610604 --- /dev/null +++ b/agent/consul/state/catalog_events_test.go @@ -0,0 +1,1492 @@ +package state + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/types" + "github.com/stretchr/testify/require" +) + +func TestServiceHealthEventsFromChanges(t *testing.T) { + cases := []struct { + Name string + Setup func(s *Store, tx *txn) error + Mutate func(s *Store, tx *txn) error + WantEvents []stream.Event + WantErr bool + }{ + { + Name: "irrelevant events", + Mutate: func(s *Store, tx *txn) error { + return kvsSetTxn(tx, tx.Index, &structs.DirEntry{ + Key: "foo", + Value: []byte("bar"), + }, false) + }, + WantEvents: nil, + WantErr: false, + }, + { + Name: "service reg, new node", + Mutate: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")) + }, + WantEvents: []stream.Event{ + testServiceHealthEvent(t, "web"), + }, + WantErr: false, + }, + { + Name: "service reg, existing node", + Setup: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")) + }, + Mutate: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")) + }, + WantEvents: []stream.Event{ + // Should only publish new service + testServiceHealthEvent(t, "web", evNodeUnchanged), + }, + WantErr: false, + }, + { + Name: "service dereg, existing node", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil) + }, + WantEvents: []stream.Event{ + // Should only publish deregistration for that service + testServiceHealthDeregistrationEvent(t, "web"), + }, + WantErr: false, + }, + { + Name: "node dereg", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "db")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web")); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + return s.deleteNodeTxn(tx, tx.Index, "node1") + }, + WantEvents: []stream.Event{ + // Should publish deregistration events for all services + testServiceHealthDeregistrationEvent(t, "db"), + testServiceHealthDeregistrationEvent(t, "web"), + }, + WantErr: false, + }, + { + Name: "connect native reg, new node", + Mutate: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regConnectNative)) + }, + WantEvents: []stream.Event{ + // We should see both a regular service health event as well as a connect + // one. + testServiceHealthEvent(t, "web", evConnectNative), + testServiceHealthEvent(t, "web", evConnectNative, evConnectTopic), + }, + WantErr: false, + }, + { + Name: "connect native reg, existing node", + Setup: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "db")) + }, + Mutate: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regConnectNative)) + }, + WantEvents: []stream.Event{ + // We should see both a regular service health event as well as a connect + // one. + testServiceHealthEvent(t, "web", + evNodeUnchanged, + evConnectNative), + testServiceHealthEvent(t, "web", + evNodeUnchanged, + evConnectNative, + evConnectTopic), + }, + WantErr: false, + }, + { + Name: "connect native dereg, existing node", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "db")); err != nil { + return err + } + + return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regConnectNative)) + }, + Mutate: func(s *Store, tx *txn) error { + return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil) + }, + WantEvents: []stream.Event{ + // We should see both a regular service dereg event and a connect one + testServiceHealthDeregistrationEvent(t, "web", evConnectNative), + testServiceHealthDeregistrationEvent(t, "web", evConnectNative, evConnectTopic), + }, + WantErr: false, + }, + { + Name: "connect sidecar reg, new node", + Mutate: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web")); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regSidecar)) + }, + WantEvents: []stream.Event{ + // We should see both a regular service health event for the web service + // another for the sidecar service and a connect event for web. + testServiceHealthEvent(t, "web"), + testServiceHealthEvent(t, "web", evSidecar), + testServiceHealthEvent(t, "web", evConnectTopic, evSidecar), + }, + WantErr: false, + }, + { + Name: "connect sidecar reg, existing node", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")) + }, + Mutate: func(s *Store, tx *txn) error { + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)) + }, + WantEvents: []stream.Event{ + // We should see both a regular service health event for the proxy + // service and a connect one for the target service. + testServiceHealthEvent(t, "web", evSidecar, evNodeUnchanged), + testServiceHealthEvent(t, "web", evConnectTopic, evSidecar, evNodeUnchanged), + }, + WantErr: false, + }, + { + Name: "connect sidecar dereg, existing node", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)) + }, + Mutate: func(s *Store, tx *txn) error { + // Delete only the sidecar + return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil) + }, + WantEvents: []stream.Event{ + // We should see both a regular service dereg event and a connect one + testServiceHealthDeregistrationEvent(t, "web", evSidecar), + testServiceHealthDeregistrationEvent(t, "web", evConnectTopic, evSidecar), + }, + WantErr: false, + }, + { + Name: "connect sidecar mutate svc", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)) + }, + Mutate: func(s *Store, tx *txn) error { + // Change port of the target service instance + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regMutatePort)) + }, + WantEvents: []stream.Event{ + // We should see the service topic update but not connect since proxy + // details didn't change. + testServiceHealthEvent(t, "web", + evMutatePort, + evNodeUnchanged, + evServiceMutated, + evChecksUnchanged, + ), + }, + WantErr: false, + }, + { + Name: "connect sidecar mutate sidecar", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)) + }, + Mutate: func(s *Store, tx *txn) error { + // Change port of the sidecar service instance + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar, regMutatePort)) + }, + WantEvents: []stream.Event{ + // We should see the proxy service topic update and a connect update + testServiceHealthEvent(t, "web", + evSidecar, + evMutatePort, + evNodeUnchanged, + evServiceMutated, + evChecksUnchanged), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evNodeUnchanged, + evMutatePort, + evServiceMutated, + evChecksUnchanged), + }, + WantErr: false, + }, + { + Name: "connect sidecar rename service", + Setup: func(s *Store, tx *txn) error { + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)) + }, + Mutate: func(s *Store, tx *txn) error { + // Change service name but not ID, update proxy too + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regRenameService)); err != nil { + return err + } + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar, regRenameService)) + }, + WantEvents: []stream.Event{ + // We should see events to deregister the old service instance and the + // old connect instance since we changed topic key for both. Then new + // service and connect registrations. The proxy instance should also + // change since it's not proxying a different service. + testServiceHealthDeregistrationEvent(t, "web"), + testServiceHealthEvent(t, "web", + evRenameService, + evServiceMutated, + evNodeUnchanged, + evChecksMutated, + ), + testServiceHealthDeregistrationEvent(t, "web", + evConnectTopic, + evSidecar, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evRenameService, + evNodeUnchanged, + evServiceMutated, + evChecksUnchanged, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evNodeUnchanged, + evRenameService, + evServiceMutated, + evChecksUnchanged, + ), + }, + WantErr: false, + }, + { + Name: "connect sidecar change destination service", + Setup: func(s *Store, tx *txn) error { + // Register a web_changed service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web_changed")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // And a sidecar initially for web, will be moved to target web_changed + // in Mutate. + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)) + }, + Mutate: func(s *Store, tx *txn) error { + // Change only the destination service of the proxy without a service + // rename or deleting and recreating the proxy. This is far fetched but + // still valid. + return s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar, regRenameService)) + }, + WantEvents: []stream.Event{ + // We should only see service health events for the sidecar service + // since the actual target services didn't change. But also should see + // Connect topic dereg for the old name to update existing subscribers + // for Connect/web. + testServiceHealthDeregistrationEvent(t, "web", + evConnectTopic, + evSidecar, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evRenameService, + evNodeUnchanged, + evServiceMutated, + evChecksUnchanged, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evNodeUnchanged, + evRenameService, + evServiceMutated, + evChecksUnchanged, + ), + }, + WantErr: false, + }, + { + Name: "multi-service node update", + Setup: func(s *Store, tx *txn) error { + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // Change only the node meta. + return s.ensureRegistrationTxn(tx, tx.Index, false, + testNodeRegistration(t, regNodeMeta)) + }, + WantEvents: []stream.Event{ + // We should see updates for all services and a connect update for the + // sidecar's destination. + testServiceHealthEvent(t, "db", + evNodeMeta, + evNodeMutated, + evServiceUnchanged, + evChecksUnchanged, + ), + testServiceHealthEvent(t, "web", + evNodeMeta, + evNodeMutated, + evServiceUnchanged, + evChecksUnchanged, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evNodeMeta, + evNodeMutated, + evServiceUnchanged, + evChecksUnchanged, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evNodeMeta, + evNodeMutated, + evServiceUnchanged, + evChecksUnchanged, + ), + }, + WantErr: false, + }, + { + Name: "multi-service node rename", + Setup: func(s *Store, tx *txn) error { + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // Change only the node NAME but not it's ID. We do it for every service + // though since this is effectively what client agent anti-entropy would + // do on a node rename. If we only rename the node it will have no + // services registered afterwards. + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db", regRenameNode)); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regRenameNode)); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar, regRenameNode)); err != nil { + return err + } + return nil + }, + WantEvents: []stream.Event{ + // Node rename is implemented internally as a node delete and new node + // insert after some renaming validation. So we should see full set of + // new events for health, then the deletions of old services, then the + // connect update and delete pair. + testServiceHealthEvent(t, "db", + evRenameNode, + // Although we delete and re-insert, we do maintain the CreatedIndex + // of the node record from the old one. + evNodeMutated, + ), + testServiceHealthEvent(t, "web", + evRenameNode, + evNodeMutated, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evRenameNode, + evNodeMutated, + ), + // dereg events for old node name services + testServiceHealthDeregistrationEvent(t, "db"), + testServiceHealthDeregistrationEvent(t, "web"), + testServiceHealthDeregistrationEvent(t, "web", evSidecar), + // Connect topic updates are last due to the way we add them + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evRenameNode, + evNodeMutated, + ), + testServiceHealthDeregistrationEvent(t, "web", evConnectTopic, evSidecar), + }, + WantErr: false, + }, + { + Name: "multi-service node check failure", + Setup: func(s *Store, tx *txn) error { + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // Change only the node-level check status + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regNodeCheckFail)); err != nil { + return err + } + return nil + }, + WantEvents: []stream.Event{ + testServiceHealthEvent(t, "db", + evNodeCheckFail, + evNodeUnchanged, + evServiceUnchanged, + // Only the node check changed. This needs to come after evNodeUnchanged + evNodeChecksMutated, + ), + testServiceHealthEvent(t, "web", + evNodeCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evNodeChecksMutated, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evNodeCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evNodeChecksMutated, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evNodeCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evNodeChecksMutated, + ), + }, + WantErr: false, + }, + { + Name: "multi-service node service check failure", + Setup: func(s *Store, tx *txn) error { + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // Change the service-level check status + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regServiceCheckFail)); err != nil { + return err + } + // Also change the service-level check status for the proxy. This is + // analogous to what would happen with an alias check on the client side + // - the proxies check would get updated at roughly the same time as the + // target service check updates. + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar, regServiceCheckFail)); err != nil { + return err + } + return nil + }, + WantEvents: []stream.Event{ + // Should only see the events for that one service change, the sidecar + // service and hence the connect topic for that service. + testServiceHealthEvent(t, "web", + evServiceCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evChecksMutated, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evServiceCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evChecksMutated, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evServiceCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evChecksMutated, + ), + }, + WantErr: false, + }, + { + Name: "multi-service node node-level check delete", + Setup: func(s *Store, tx *txn) error { + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // Delete only the node-level check + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil); err != nil { + return err + } + return nil + }, + WantEvents: []stream.Event{ + testServiceHealthEvent(t, "db", + evNodeCheckDelete, + evNodeUnchanged, + evServiceUnchanged, + ), + testServiceHealthEvent(t, "web", + evNodeCheckDelete, + evNodeUnchanged, + evServiceUnchanged, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evNodeCheckDelete, + evNodeUnchanged, + evServiceUnchanged, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evNodeCheckDelete, + evNodeUnchanged, + evServiceUnchanged, + ), + }, + WantErr: false, + }, + { + Name: "multi-service node service check delete", + Setup: func(s *Store, tx *txn) error { + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // Delete the service-level check for the main service + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil); err != nil { + return err + } + // Also delete for a proxy + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil); err != nil { + return err + } + return nil + }, + WantEvents: []stream.Event{ + // Should only see the events for that one service change, the sidecar + // service and hence the connect topic for that service. + testServiceHealthEvent(t, "web", + evServiceCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evServiceCheckDelete, + ), + testServiceHealthEvent(t, "web", + evSidecar, + evServiceCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evServiceCheckDelete, + ), + testServiceHealthEvent(t, "web", + evConnectTopic, + evSidecar, + evServiceCheckFail, + evNodeUnchanged, + evServiceUnchanged, + evServiceCheckDelete, + ), + }, + WantErr: false, + }, + { + Name: "many services on many nodes in one TX", + Setup: func(s *Store, tx *txn) error { + // Node1 + + // Register a db service + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "db")); err != nil { + return err + } + + // Node2 + // Also a web + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regNode2)); err != nil { + return err + } + // With a connect sidecar + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar, regNode2)); err != nil { + return err + } + + return nil + }, + Mutate: func(s *Store, tx *txn) error { + // In one transaction the operator moves the web service and it's + // sidecar from node2 back to node1 and deletes them from node2 + + if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil); err != nil { + return err + } + if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil); err != nil { + return err + } + + // Register those on node1 + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web")); err != nil { + return err + } + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "web", regSidecar)); err != nil { + return err + } + + // And for good measure, add a new connect-native service to node2 + if err := s.ensureRegistrationTxn(tx, tx.Index, false, + testServiceRegistration(t, "api", regConnectNative, regNode2)); err != nil { + return err + } + + return nil + }, + WantEvents: []stream.Event{ + // We should see: + // - service dereg for web and proxy on node2 + // - connect dereg for web on node2 + // - service reg for web and proxy on node1 + // - connect reg for web on node1 + // - service reg for api on node2 + // - connect reg for api on node2 + testServiceHealthDeregistrationEvent(t, "web", evNode2), + testServiceHealthDeregistrationEvent(t, "web", evNode2, evSidecar), + testServiceHealthDeregistrationEvent(t, "web", + evConnectTopic, + evNode2, + evSidecar, + ), + + testServiceHealthEvent(t, "web", evNodeUnchanged), + testServiceHealthEvent(t, "web", evSidecar, evNodeUnchanged), + testServiceHealthEvent(t, "web", evConnectTopic, evSidecar, evNodeUnchanged), + + testServiceHealthEvent(t, "api", + evNode2, + evConnectNative, + evNodeUnchanged, + ), + testServiceHealthEvent(t, "api", + evNode2, + evConnectTopic, + evConnectNative, + evNodeUnchanged, + ), + }, + WantErr: false, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + s := testStateStore(t) + + if tc.Setup != nil { + // Bypass the publish mechanism for this test or we get into odd + // recursive stuff... + setupTx := s.db.WriteTxn(10) + require.NoError(t, tc.Setup(s, setupTx)) + // Commit the underlying transaction without using wrapped Commit so we + // avoid the whole event publishing system for setup here. It _should_ + // work but it makes debugging test hard as it will call the function + // under test for the setup data... + setupTx.Txn.Commit() + } + + tx := s.db.WriteTxn(100) + require.NoError(t, tc.Mutate(s, tx)) + + // Note we call the func under test directly rather than publishChanges so + // we can test this in isolation. + got, err := ServiceHealthEventsFromChanges(tx, Changes{Changes: tx.Changes(), Index: 100}) + if tc.WantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Make sure we have the right events, only taking ordering into account + // where it matters to account for non-determinism. + requireEventsInCorrectPartialOrder(t, tc.WantEvents, got, func(e stream.Event) string { + // We need events affecting unique registrations to be ordered, within a topic + csn := getPayloadCheckServiceNode(e.Payload) + return fmt.Sprintf("%s/%s/%s", e.Topic, csn.Node.Node, csn.Service.Service) + }) + }) + } +} + +type regOption func(req *structs.RegisterRequest) error + +func testNodeRegistration(t *testing.T, opts ...regOption) *structs.RegisterRequest { + r := &structs.RegisterRequest{ + Datacenter: "dc1", + ID: "11111111-2222-3333-4444-555555555555", + Node: "node1", + Address: "10.10.10.10", + Checks: structs.HealthChecks{ + &structs.HealthCheck{ + CheckID: "serf-health", + Name: "serf-health", + Node: "node1", + Status: api.HealthPassing, + }, + }, + } + for _, opt := range opts { + err := opt(r) + require.NoError(t, err) + } + return r +} + +func testServiceRegistration(t *testing.T, svc string, opts ...regOption) *structs.RegisterRequest { + // note: don't pass opts or they might get applied twice! + r := testNodeRegistration(t) + r.Service = &structs.NodeService{ + ID: svc, + Service: svc, + Port: 8080, + } + r.Checks = append(r.Checks, + &structs.HealthCheck{ + CheckID: types.CheckID("service:" + svc), + Name: "service:" + svc, + Node: "node1", + ServiceID: svc, + ServiceName: svc, + Type: "ttl", + Status: api.HealthPassing, + }) + for _, opt := range opts { + err := opt(r) + require.NoError(t, err) + } + return r +} + +type eventOption func(e *stream.Event) error + +func testServiceHealthEvent(t *testing.T, svc string, opts ...eventOption) stream.Event { + e := newTestEventServiceHealthRegister(100, 1, svc) + + // Normalize a few things that are different in the generic event which was + // based on original code here but made more general. This means we don't have + // to change all the test loads... + csn := getPayloadCheckServiceNode(e.Payload) + csn.Node.ID = "11111111-2222-3333-4444-555555555555" + csn.Node.Address = "10.10.10.10" + + for _, opt := range opts { + err := opt(&e) + require.NoError(t, err) + } + return e +} + +func testServiceHealthDeregistrationEvent(t *testing.T, svc string, opts ...eventOption) stream.Event { + e := newTestEventServiceHealthDeregister(100, 1, svc) + for _, opt := range opts { + err := opt(&e) + require.NoError(t, err) + } + return e +} + +// regConnectNative option converts the base registration into a Connect-native +// one. +func regConnectNative(req *structs.RegisterRequest) error { + if req.Service == nil { + return nil + } + req.Service.Connect.Native = true + return nil +} + +// regSidecar option converts the base registration request +// into the registration for it's sidecar service. +func regSidecar(req *structs.RegisterRequest) error { + if req.Service == nil { + return nil + } + svc := req.Service.Service + + req.Service.Kind = structs.ServiceKindConnectProxy + req.Service.ID = svc + "_sidecar_proxy" + req.Service.Service = svc + "_sidecar_proxy" + req.Service.Port = 20000 + req.Service.Port + + req.Service.Proxy.DestinationServiceName = svc + req.Service.Proxy.DestinationServiceID = svc + + // Convert the check to point to the right ID now. This isn't totally + // realistic - sidecars should have alias checks etc but this is good enough + // to test this code path. + if len(req.Checks) >= 2 { + req.Checks[1].CheckID = types.CheckID("service:" + svc + "_sidecar_proxy") + req.Checks[1].ServiceID = svc + "_sidecar_proxy" + } + + return nil +} + +// regNodeCheckFail option converts the base registration request +// into a registration with the node-level health check failing +func regNodeCheckFail(req *structs.RegisterRequest) error { + req.Checks[0].Status = api.HealthCritical + return nil +} + +// regServiceCheckFail option converts the base registration request +// into a registration with the service-level health check failing +func regServiceCheckFail(req *structs.RegisterRequest) error { + req.Checks[1].Status = api.HealthCritical + return nil +} + +// regMutatePort option alters the base registration service port by a relative +// amount to simulate a service change. Can be used with regSidecar since it's a +// relative change (+10). +func regMutatePort(req *structs.RegisterRequest) error { + if req.Service == nil { + return nil + } + req.Service.Port += 10 + return nil +} + +// regRenameService option alters the base registration service name but not +// it's ID simulating a service being renamed while it's ID is maintained +// separately e.g. by a scheduler. This is an edge case but an important one as +// it changes which topic key events propagate. +func regRenameService(req *structs.RegisterRequest) error { + if req.Service == nil { + return nil + } + isSidecar := req.Service.Kind == structs.ServiceKindConnectProxy + + if !isSidecar { + req.Service.Service += "_changed" + // Update service checks + if len(req.Checks) >= 2 { + req.Checks[1].ServiceName += "_changed" + } + return nil + } + // This is a sidecar, it's not really realistic but lets only update the + // fields necessary to make it work again with the new service name to be sure + // we get the right result. This is certainly possible if not likely so a + // valid case. + + // We don't need to update out own details, only the name of the destination + req.Service.Proxy.DestinationServiceName += "_changed" + + return nil +} + +// regRenameNode option alters the base registration node name by adding the +// _changed suffix. +func regRenameNode(req *structs.RegisterRequest) error { + req.Node += "_changed" + for i := range req.Checks { + req.Checks[i].Node = req.Node + } + return nil +} + +// regNode2 option alters the base registration to be on a different node. +func regNode2(req *structs.RegisterRequest) error { + req.Node = "node2" + req.ID = "22222222-2222-3333-4444-555555555555" + for i := range req.Checks { + req.Checks[i].Node = req.Node + } + return nil +} + +// regNodeMeta option alters the base registration node to add some meta data. +func regNodeMeta(req *structs.RegisterRequest) error { + req.NodeMeta = map[string]string{"foo": "bar"} + return nil +} + +// evNodeUnchanged option converts the event to reset the node and node check +// raft indexes to the original value where we expect the node not to have been +// changed in the mutation. +func evNodeUnchanged(e *stream.Event) error { + // If the node wasn't touched, its modified index and check's modified + // indexes should be the original ones. + csn := getPayloadCheckServiceNode(e.Payload) + + // Check this isn't a dereg event with made up/placeholder node info + if csn.Node.CreateIndex == 0 { + return nil + } + csn.Node.CreateIndex = 10 + csn.Node.ModifyIndex = 10 + csn.Checks[0].CreateIndex = 10 + csn.Checks[0].ModifyIndex = 10 + return nil +} + +// evServiceUnchanged option converts the event to reset the service and service +// check raft indexes to the original value where we expect the service record +// not to have been changed in the mutation. +func evServiceUnchanged(e *stream.Event) error { + // If the node wasn't touched, its modified index and check's modified + // indexes should be the original ones. + csn := getPayloadCheckServiceNode(e.Payload) + + csn.Service.CreateIndex = 10 + csn.Service.ModifyIndex = 10 + if len(csn.Checks) > 1 { + csn.Checks[1].CreateIndex = 10 + csn.Checks[1].ModifyIndex = 10 + } + return nil +} + +// evConnectNative option converts the base event to represent a connect-native +// service instance. +func evConnectNative(e *stream.Event) error { + getPayloadCheckServiceNode(e.Payload).Service.Connect.Native = true + return nil +} + +// evConnectTopic option converts the base event to the equivalent event that +// should be published to the connect topic. When needed it should be applied +// first as several other options (notable evSidecar) change behavior subtly +// depending on which topic they are published to and they determin this from +// the event. +func evConnectTopic(e *stream.Event) error { + e.Topic = TopicServiceHealthConnect + return nil +} + +// evSidecar option converts the base event to the health (not connect) event +// expected from the sidecar proxy registration for that service instead. When +// needed it should be applied after any option that changes topic (e.g. +// evConnectTopic) but before other options that might change behavior subtly +// depending on whether it's a sidecar or regular service event (e.g. +// evRenameService). +func evSidecar(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + + svc := csn.Service.Service + + csn.Service.Kind = structs.ServiceKindConnectProxy + csn.Service.ID = svc + "_sidecar_proxy" + csn.Service.Service = svc + "_sidecar_proxy" + csn.Service.Port = 20000 + csn.Service.Port + + csn.Service.Proxy.DestinationServiceName = svc + csn.Service.Proxy.DestinationServiceID = svc + + // Convert the check to point to the right ID now. This isn't totally + // realistic - sidecars should have alias checks etc but this is good enough + // to test this code path. + if len(csn.Checks) >= 2 { + csn.Checks[1].CheckID = types.CheckID("service:" + svc + "_sidecar_proxy") + csn.Checks[1].ServiceID = svc + "_sidecar_proxy" + csn.Checks[1].ServiceName = svc + "_sidecar_proxy" + } + + // Update event key to be the proxy service name, but only if this is not + // already in the connect topic + if e.Topic != TopicServiceHealthConnect { + e.Key = csn.Service.Service + } + return nil +} + +// evMutatePort option alters the base event service port by a relative +// amount to simulate a service change. Can be used with evSidecar since it's a +// relative change (+10). +func evMutatePort(e *stream.Event) error { + getPayloadCheckServiceNode(e.Payload).Service.Port += 10 + return nil +} + +// evNodeMutated option alters the base event node to set it's CreateIndex +// (but not modify index) to the setup index. This expresses that we expect the +// node record originally created in setup to have been mutated during the +// update. +func evNodeMutated(e *stream.Event) error { + getPayloadCheckServiceNode(e.Payload).Node.CreateIndex = 10 + return nil +} + +// evServiceMutated option alters the base event service to set it's CreateIndex +// (but not modify index) to the setup index. This expresses that we expect the +// service record originally created in setup to have been mutated during the +// update. +func evServiceMutated(e *stream.Event) error { + getPayloadCheckServiceNode(e.Payload).Service.CreateIndex = 10 + return nil +} + +// evChecksMutated option alters the base event service check to set it's +// CreateIndex (but not modify index) to the setup index. This expresses that we +// expect the service check records originally created in setup to have been +// mutated during the update. NOTE: this must be sequenced after +// evServiceUnchanged if both are used. +func evChecksMutated(e *stream.Event) error { + getPayloadCheckServiceNode(e.Payload).Checks[1].CreateIndex = 10 + getPayloadCheckServiceNode(e.Payload).Checks[1].ModifyIndex = 100 + return nil +} + +// evNodeChecksMutated option alters the base event node check to set it's +// CreateIndex (but not modify index) to the setup index. This expresses that we +// expect the node check records originally created in setup to have been +// mutated during the update. NOTE: this must be sequenced after evNodeUnchanged +// if both are used. +func evNodeChecksMutated(e *stream.Event) error { + getPayloadCheckServiceNode(e.Payload).Checks[0].CreateIndex = 10 + getPayloadCheckServiceNode(e.Payload).Checks[0].ModifyIndex = 100 + return nil +} + +// evChecksUnchanged option alters the base event service to set all check raft +// indexes to the setup index. This expresses that we expect none of the checks +// to have changed in the update. +func evChecksUnchanged(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + for i := range csn.Checks { + csn.Checks[i].CreateIndex = 10 + csn.Checks[i].ModifyIndex = 10 + } + return nil +} + +// evRenameService option alters the base event service to change the service +// name but not ID simulating an in-place service rename. +func evRenameService(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + isSidecar := csn.Service.Kind == structs.ServiceKindConnectProxy + + if !isSidecar { + csn.Service.Service += "_changed" + // Update service checks + if len(csn.Checks) >= 2 { + csn.Checks[1].ServiceName += "_changed" + } + e.Key += "_changed" + return nil + } + // This is a sidecar, it's not really realistic but lets only update the + // fields necessary to make it work again with the new service name to be sure + // we get the right result. This is certainly possible if not likely so a + // valid case. + + // We don't need to update out own details, only the name of the destination + csn.Service.Proxy.DestinationServiceName += "_changed" + + // If this is the connect topic we need to change the key too + if e.Topic == TopicServiceHealthConnect { + e.Key += "_changed" + } + return nil +} + +// evNodeMeta option alters the base event node to add some meta data. +func evNodeMeta(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + csn.Node.Meta = map[string]string{"foo": "bar"} + return nil +} + +// evRenameNode option alters the base event node name. +func evRenameNode(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + csn.Node.Node += "_changed" + for i := range csn.Checks { + csn.Checks[i].Node = csn.Node.Node + } + return nil +} + +// evNode2 option alters the base event to refer to a different node +func evNode2(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + csn.Node.Node = "node2" + // Only change ID if it's set (e.g. it's not in a deregistration event) + if csn.Node.ID != "" { + csn.Node.ID = "22222222-2222-3333-4444-555555555555" + } + for i := range csn.Checks { + csn.Checks[i].Node = csn.Node.Node + } + return nil +} + +// evNodeCheckFail option alters the base event to set the node-level health +// check to be failing +func evNodeCheckFail(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + csn.Checks[0].Status = api.HealthCritical + return nil +} + +// evNodeCheckDelete option alters the base event to remove the node-level +// health check +func evNodeCheckDelete(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + // Ensure this is idempotent as we sometimes get called multiple times.. + if len(csn.Checks) > 0 && csn.Checks[0].ServiceID == "" { + csn.Checks = csn.Checks[1:] + } + return nil +} + +// evServiceCheckFail option alters the base event to set the service-level health +// check to be failing +func evServiceCheckFail(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + csn.Checks[1].Status = api.HealthCritical + return nil +} + +// evServiceCheckDelete option alters the base event to remove the service-level +// health check +func evServiceCheckDelete(e *stream.Event) error { + csn := getPayloadCheckServiceNode(e.Payload) + // Ensure this is idempotent as we sometimes get called multiple times.. + if len(csn.Checks) > 1 && csn.Checks[1].ServiceID != "" { + csn.Checks = csn.Checks[0:1] + } + return nil +} + +// requireEventsInCorrectPartialOrder compares that the expected set of events +// was emitted. It allows for _independent_ events to be emitted in any order - +// this can be important because even though the transaction processing is all +// strictly ordered up until the processing func, grouping multiple updates that +// affect the same logical entity may be necessary and may impose random +// ordering changes on the eventual events if a map is used. We only care that +// events _affecting the same topic and key_ are ordered correctly with respect +// to the "expected" set of events so this helper asserts that. +// +// The caller provides a func that can return a partition key for the given +// event types and we assert that all events with the same partition key are +// deliveries in the same order. Note that this is not necessarily the same as +// topic/key since for example in Catalog only events about a specific service +// _instance_ need to be ordered while topic and key are more general. +func requireEventsInCorrectPartialOrder(t *testing.T, want, got []stream.Event, + partKey func(stream.Event) string) { + t.Helper() + + // Partion both arrays by topic/key + wantParts := make(map[string][]stream.Event) + gotParts := make(map[string][]stream.Event) + + for _, e := range want { + k := partKey(e) + wantParts[k] = append(wantParts[k], e) + } + for _, e := range got { + k := partKey(e) + gotParts[k] = append(gotParts[k], e) + } + + for k, want := range wantParts { + require.Equal(t, want, gotParts[k], "got incorrect events for partition: %s", k) + } + + for k, got := range gotParts { + if _, ok := wantParts[k]; !ok { + require.Equal(t, nil, got, "got unwanted events for partition: %s", k) + } + } +} + +// newTestEventServiceHealthRegister returns a realistically populated service +// health registration event. The nodeNum is a +// logical node and is used to create the node name ("node%d") but also change +// the node ID and IP address to make it a little more realistic for cases that +// need that. nodeNum should be less than 64k to make the IP address look +// realistic. Any other changes can be made on the returned event to avoid +// adding too many options to callers. +func newTestEventServiceHealthRegister(index uint64, nodeNum int, svc string) stream.Event { + node := fmt.Sprintf("node%d", nodeNum) + nodeID := types.NodeID(fmt.Sprintf("11111111-2222-3333-4444-%012d", nodeNum)) + addr := fmt.Sprintf("10.10.%d.%d", nodeNum/256, nodeNum%256) + + return stream.Event{ + Topic: TopicServiceHealth, + Key: svc, + Index: index, + Payload: eventPayload{ + Op: OpCreate, + Obj: &structs.CheckServiceNode{ + Node: &structs.Node{ + ID: nodeID, + Node: node, + Address: addr, + Datacenter: "dc1", + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, + Service: &structs.NodeService{ + ID: svc, + Service: svc, + Port: 8080, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, + Checks: []*structs.HealthCheck{ + { + Node: node, + CheckID: "serf-health", + Name: "serf-health", + Status: "passing", + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, + { + Node: node, + CheckID: types.CheckID("service:" + svc), + Name: "service:" + svc, + ServiceID: svc, + ServiceName: svc, + Type: "ttl", + Status: "passing", + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, + }, + }, + }, + } +} + +// TestEventServiceHealthDeregister returns a realistically populated service +// health deregistration event. The nodeNum is a +// logical node and is used to create the node name ("node%d") but also change +// the node ID and IP address to make it a little more realistic for cases that +// need that. nodeNum should be less than 64k to make the IP address look +// realistic. Any other changes can be made on the returned event to avoid +// adding too many options to callers. +func newTestEventServiceHealthDeregister(index uint64, nodeNum int, svc string) stream.Event { + return stream.Event{ + Topic: TopicServiceHealth, + Key: svc, + Index: index, + Payload: eventPayload{ + Op: OpDelete, + Obj: &structs.CheckServiceNode{ + Node: &structs.Node{ + Node: fmt.Sprintf("node%d", nodeNum), + }, + Service: &structs.NodeService{ + ID: svc, + Service: svc, + Port: 8080, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + RaftIndex: structs.RaftIndex{ + // The original insertion index since a delete doesn't update + // this. This magic value came from state store tests where we + // setup at index 10 and then mutate at index 100. It can be + // modified by the caller later and makes it easier than having + // yet another argument in the common case. + CreateIndex: 10, + ModifyIndex: 10, + }, + }, + }, + }, + } +} diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 1aaa45dd8..8ac0cf57a 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -165,11 +165,34 @@ func (t topic) String() string { return string(t) } +var ( + // TopicServiceHealth contains events for all registered service instances. + TopicServiceHealth topic = "topic-service-health" + // TopicServiceHealthConnect contains events for connect-enabled service instances. + TopicServiceHealthConnect topic = "topic-service-health-connect" +) + func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { - // TODO: add other table handlers here. - return aclChangeUnsubscribeEvent(tx, changes) + var events []stream.Event + fns := []func(tx ReadTxn, changes Changes) ([]stream.Event, error){ + aclChangeUnsubscribeEvent, + ServiceHealthEventsFromChanges, + // TODO: add other table handlers here. + } + for _, fn := range fns { + e, err := fn(tx, changes) + if err != nil { + return nil, err + } + events = append(events, e...) + } + return events, nil } -func newSnapshotHandlers() stream.SnapshotHandlers { - return stream.SnapshotHandlers{} +// TODO: could accept a ReadTxner instead of a Store +func newSnapshotHandlers(s *Store) stream.SnapshotHandlers { + return stream.SnapshotHandlers{ + TopicServiceHealth: s.ServiceHealthSnapshot, + TopicServiceHealthConnect: s.ServiceHealthConnectSnapshot, + } } diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index d19922eec..3a7229607 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -162,17 +162,17 @@ func NewStateStore(gc *TombstoneGC) (*Store, error) { ctx, cancel := context.WithCancel(context.TODO()) s := &Store{ - schema: schema, - abandonCh: make(chan struct{}), - kvsGraveyard: NewGraveyard(gc), - lockDelay: NewDelay(), - db: &changeTrackerDB{ - db: db, - publisher: stream.NewEventPublisher(ctx, newSnapshotHandlers(), 10*time.Second), - processChanges: processDBChanges, - }, + schema: schema, + abandonCh: make(chan struct{}), + kvsGraveyard: NewGraveyard(gc), + lockDelay: NewDelay(), stopEventPublisher: cancel, } + s.db = &changeTrackerDB{ + db: db, + publisher: stream.NewEventPublisher(ctx, newSnapshotHandlers(s), 10*time.Second), + processChanges: processDBChanges, + } return s, nil } From 7c3c627028d82c7edbb9e3670fd3f6b50d2119b0 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 21 Jul 2020 18:42:26 -0400 Subject: [PATCH 38/73] state: serviceHealthSnapshot refactored to remove unused return value and remove duplication --- agent/consul/state/catalog_events.go | 84 ++++++++++------------------ agent/consul/state/memdb.go | 4 +- 2 files changed, 32 insertions(+), 56 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 0214b7ef7..f04f9f60f 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -6,37 +6,6 @@ import ( memdb "github.com/hashicorp/go-memdb" ) -// ServiceHealthSnapshot is a stream.SnapFn that provides a streaming snapshot -// of stream.Events that describe the current state of a service health query. -func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { - tx := s.db.Txn(false) - defer tx.Abort() - // TODO(namespace-streaming): plumb entMeta through from SubscribeRequest - idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, false, nil) - if err != nil { - return 0, err - } - - _, err = checkServiceNodesToServiceHealth(idx, nodes, buf, TopicServiceHealth) - return idx, err -} - -// ServiceHealthSnapshot is a stream.SnapFn that provides a streaming snapshot -// of stream.Events that describe the current state of a service connect health -// query. -func (s *Store) ServiceHealthConnectSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { - tx := s.db.Txn(false) - defer tx.Abort() - // TODO(namespace-streaming): plumb entMeta through from SubscribeRequest - idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, true, nil) - if err != nil { - return 0, err - } - - _, err = checkServiceNodesToServiceHealth(idx, nodes, buf, TopicServiceHealthConnect) - return idx, err -} - type changeOp int const ( @@ -50,35 +19,42 @@ type eventPayload struct { Obj interface{} } -// checkServiceNodesToServiceHealth converts a list of structs.CheckServiceNodes -// to stream.ServiceHealth events for streaming. If a non-nil event buffer is -// passed, events are appended to the buffer one at a time and an nil slice is -// returned to avoid keeping a full copy in memory. -func checkServiceNodesToServiceHealth(idx uint64, nodes structs.CheckServiceNodes, - buf stream.SnapshotAppender, topic topic) ([]stream.Event, error) { - var events []stream.Event - for _, n := range nodes { - event := stream.Event{ - Index: idx, - Topic: topic, - Payload: eventPayload{ - Op: OpCreate, - Obj: &n, - }, +// serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot +// of stream.Events that describe the current state of a service health query. +// +// TODO: no tests for this yet +func serviceHealthSnapshot(s *Store, topic topic) stream.SnapshotFunc { + return func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { + tx := s.db.Txn(false) + defer tx.Abort() + + connect := topic == TopicServiceHealthConnect + // TODO(namespace-streaming): plumb entMeta through from SubscribeRequest + idx, nodes, err := checkServiceNodesTxn(tx, nil, req.Key, connect, nil) + if err != nil { + return 0, err } - if n.Service != nil { - event.Key = n.Service.Service - } + for _, n := range nodes { + event := stream.Event{ + Index: idx, + Topic: topic, + Payload: eventPayload{ + Op: OpCreate, + Obj: &n, + }, + } - // TODO: always called with a non-nil buf? - if buf != nil { + if n.Service != nil { + event.Key = n.Service.Service + } + + // TODO: could all the events be appended as a single item? buf.Append([]stream.Event{event}) - } else { - events = append(events, event) } + + return idx, err } - return events, nil } type nodeServiceTuple struct { diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index 8ac0cf57a..d16faa651 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -192,7 +192,7 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { // TODO: could accept a ReadTxner instead of a Store func newSnapshotHandlers(s *Store) stream.SnapshotHandlers { return stream.SnapshotHandlers{ - TopicServiceHealth: s.ServiceHealthSnapshot, - TopicServiceHealthConnect: s.ServiceHealthConnectSnapshot, + TopicServiceHealth: serviceHealthSnapshot(s, TopicServiceHealth), + TopicServiceHealthConnect: serviceHealthSnapshot(s, TopicServiceHealthConnect), } } From 668b98bcce6608c0888b67592a3432511e1d2edc Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 21 Jul 2020 19:39:36 -0400 Subject: [PATCH 39/73] state: use an enum for tracking node changes --- agent/consul/state/catalog_events.go | 61 ++++++++++++++++------------ 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index f04f9f60f..731a95d97 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -68,20 +68,19 @@ type nodeServiceTuple struct { func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { var events []stream.Event - var nodeChanges map[string]*memdb.Change + var nodeChanges map[string]changeType var serviceChanges map[nodeServiceTuple]*memdb.Change - markNode := func(node string, nodeChange *memdb.Change) { + markNode := func(node string, typ changeType) { if nodeChanges == nil { - nodeChanges = make(map[string]*memdb.Change) + nodeChanges = make(map[string]changeType) } // If the caller has an actual node mutation ensure we store it even if the // node is already marked. If the caller is just marking the node dirty // without an node change, don't overwrite any existing node change we know // about. - ch := nodeChanges[node] - if ch == nil { - nodeChanges[node] = nodeChange + if nodeChanges[node] == changeIndirect { + nodeChanges[node] = typ } } markService := func(node, service string, entMeta structs.EnterpriseMeta, svcChange *memdb.Change) { @@ -111,20 +110,11 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // we mark it anyway because if it _is_ a delete then we need to know that // later to avoid trying to deliver events when node level checks mark the // node as "changed". - nRaw := change.After - if change.After == nil { - nRaw = change.Before - } - n := nRaw.(*structs.Node) - changeCopy := change - markNode(n.Node, &changeCopy) + n := changeObject(change).(*structs.Node) + markNode(n.Node, changeTypeFromChange(change)) case "services": - snRaw := change.After - if change.After == nil { - snRaw = change.Before - } - sn := snRaw.(*structs.ServiceNode) + sn := changeObject(change).(*structs.ServiceNode) changeCopy := change markService(sn.Node, sn.ServiceID, sn.EnterpriseMeta, &changeCopy) @@ -140,7 +130,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event after := change.After.(*structs.HealthCheck) if after.ServiceID == "" || before.ServiceID == "" { // Either changed from or to being node-scoped - markNode(after.Node, nil) + markNode(after.Node, changeIndirect) } else { // Check changed which means we just need to emit for the linked // service. @@ -159,7 +149,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event before := change.Before.(*structs.HealthCheck) if before.ServiceID == "" { // Node level check - markNode(before.Node, nil) + markNode(before.Node, changeIndirect) } else { markService(before.Node, before.ServiceID, before.EnterpriseMeta, nil) } @@ -168,7 +158,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event after := change.After.(*structs.HealthCheck) if after.ServiceID == "" { // Node level check - markNode(after.Node, nil) + markNode(after.Node, changeIndirect) } else { markService(after.Node, after.ServiceID, after.EnterpriseMeta, nil) } @@ -177,11 +167,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } // Now act on those marked nodes/services - for node, change := range nodeChanges { - // change may be nil if there was a change that _affected_ the node - // like a change to checks but it didn't actually change the node - // record itself. - if change != nil && change.Deleted() { + for node, changeType := range nodeChanges { + if changeType == changeDelete { // Node deletions are a no-op here since the state store transaction will // have also removed all the service instances which will be handled in // the loop below. @@ -269,6 +256,28 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event return events, nil } +type changeType uint8 + +const ( + // changeIndirect indicates some other object changed which has implications + // for the target object. + changeIndirect changeType = iota + changeDelete + changeCreate + changeUpdate +) + +func changeTypeFromChange(change memdb.Change) changeType { + switch { + case change.Deleted(): + return changeDelete + case change.Created(): + return changeCreate + default: + return changeUpdate + } +} + // serviceHealthToConnectEvents converts already formatted service health // registration events into the ones needed to publish to the Connect topic. // This essentially means filtering out any instances that are not Connect From c61313b78aba07e3f57373f163f46cc1cb41b14a Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 21 Jul 2020 21:02:22 -0400 Subject: [PATCH 40/73] state: Remove unused args and return values Also rename some functions to identify them as constructors for events --- agent/consul/state/catalog_events.go | 105 +++++++++------------------ 1 file changed, 35 insertions(+), 70 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 731a95d97..a08499b11 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -96,8 +96,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // the service is already marked. If the caller is just marking the service // dirty without an node change, don't overwrite any existing node change we // know about. - ch := serviceChanges[k] - if ch == nil { + if serviceChanges[k] == nil { serviceChanges[k] = svcChange } } @@ -115,7 +114,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event case "services": sn := changeObject(change).(*structs.ServiceNode) - changeCopy := change + changeCopy := change // TODO: why does the change need to be copied? markService(sn.Node, sn.ServiceID, sn.EnterpriseMeta, &changeCopy) case "checks": @@ -145,22 +144,13 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } } - case change.Deleted(): - before := change.Before.(*structs.HealthCheck) - if before.ServiceID == "" { + case change.Deleted(), change.Created(): + obj := changeObject(change).(*structs.HealthCheck) + if obj.ServiceID == "" { // Node level check - markNode(before.Node, changeIndirect) + markNode(obj.Node, changeIndirect) } else { - markService(before.Node, before.ServiceID, before.EnterpriseMeta, nil) - } - - case change.Created(): - after := change.After.(*structs.HealthCheck) - if after.ServiceID == "" { - // Node level check - markNode(after.Node, changeIndirect) - } else { - markService(after.Node, after.ServiceID, after.EnterpriseMeta, nil) + markService(obj.Node, obj.ServiceID, obj.EnterpriseMeta, nil) } } } @@ -175,7 +165,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event continue } // Rebuild events for all services on this node - es, err := serviceHealthEventsForNode(tx, changes.Index, node) + es, err := newServiceHealthEventsForNode(tx, changes.Index, node) if err != nil { return nil, err } @@ -187,13 +177,9 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // like a change to checks but it didn't actually change the service // record itself. if change != nil && change.Deleted() { - // Generate delete event for the service instance and append it sn := change.Before.(*structs.ServiceNode) - es, err := serviceHealthDeregEventsForServiceInstance(changes.Index, sn, &tuple.EntMeta) - if err != nil { - return nil, err - } - events = append(events, es...) + e := newServiceHealthEventDeregister(changes.Index, sn) + events = append(events, e) continue } @@ -209,11 +195,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // go out to subscribers to the new service name topic key, but we need // to fix up subscribers that were watching the old name by sending // deregistrations. - es, err := serviceHealthDeregEventsForServiceInstance(changes.Index, before, &tuple.EntMeta) - if err != nil { - return nil, err - } - events = append(events, es...) + e := newServiceHealthEventDeregister(changes.Index, before) + events = append(events, e) } if before.ServiceKind == structs.ServiceKindConnectProxy && @@ -223,16 +206,10 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // to deregister this sidecar service though as it still exists and // didn't change its name (or if it did that was caught just above). But // our mechanism for connect events is to convert them so we generate - // the regular one, convert it to Connect topic and then discar the + // the regular one, convert it to Connect topic and then discard the // original. - es, err := serviceHealthDeregEventsForServiceInstance(changes.Index, before, &tuple.EntMeta) - if err != nil { - return nil, err - } - // Don't append es per comment above, but convert it to connect topic - // events. - es = serviceHealthToConnectEvents(es) - events = append(events, es...) + e := newServiceHealthEventDeregister(changes.Index, before) + events = append(events, serviceHealthToConnectEvents(e)...) } } @@ -242,16 +219,16 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event continue } // Build service event and append it - es, err := serviceHealthEventsForServiceInstance(tx, changes.Index, tuple) + e, err := newServiceHealthEventForService(tx, changes.Index, tuple) if err != nil { return nil, err } - events = append(events, es...) + events = append(events, e) } // Duplicate any events that affected connect-enabled instances (proxies or // native apps) to the relevant Connect topic. - events = append(events, serviceHealthToConnectEvents(events)...) + events = append(events, serviceHealthToConnectEvents(events...)...) return events, nil } @@ -284,7 +261,7 @@ func changeTypeFromChange(change memdb.Change) changeType { // enabled and so of no interest to those subscribers but also involves // switching connection details to be the proxy instead of the actual instance // in case of a sidecar. -func serviceHealthToConnectEvents(events []stream.Event) []stream.Event { +func serviceHealthToConnectEvents(events ...stream.Event) []stream.Event { serviceHealthConnectEvents := make([]stream.Event, 0, len(events)) for _, event := range events { if event.Topic != TopicServiceHealth { @@ -292,6 +269,7 @@ func serviceHealthToConnectEvents(events []stream.Event) []stream.Event { continue } node := getPayloadCheckServiceNode(event.Payload) + // TODO: do we need to handle gateways here as well? if node.Service == nil || (node.Service.Kind != structs.ServiceKindConnectProxy && !node.Service.Connect.Native) { // Event is not a service instance (i.e. just a node registration) @@ -325,11 +303,11 @@ func getPayloadCheckServiceNode(payload interface{}) *structs.CheckServiceNode { return csn } -// serviceHealthEventsForNode returns health events for all services on the +// newServiceHealthEventsForNode returns health events for all services on the // given node. This mirrors some of the the logic in the oddly-named // parseCheckServiceNodes but is more efficient since we know they are all on // the same node. -func serviceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) { +func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) { // TODO(namespace-streaming): figure out the right EntMeta and mystery arg. services, err := catalogServiceListByNode(tx, node, nil, false) if err != nil { @@ -345,13 +323,8 @@ func serviceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.E for service := services.Next(); service != nil; service = services.Next() { sn := service.(*structs.ServiceNode) - es, err := serviceHealthEventsForServiceNodeInternal(idx, n, sn, nodeChecks, svcChecks) - if err != nil { - return nil, err - } - - // Append to the results. - events = append(events, es...) + event := newServiceHealthEventRegister(idx, n, sn, nodeChecks, svcChecks) + events = append(events, event) } return events, nil @@ -396,31 +369,31 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, return n, nodeChecks, svcChecks, nil } -func serviceHealthEventsForServiceInstance(tx ReadTxn, idx uint64, tuple nodeServiceTuple) ([]stream.Event, error) { +func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTuple) (stream.Event, error) { n, nodeChecks, svcChecks, err := getNodeAndChecks(tx, tuple.Node) if err != nil { - return nil, err + return stream.Event{}, err } svc, err := getCompoundWithTxn(tx, "services", "id", &tuple.EntMeta, tuple.Node, tuple.ServiceID) if err != nil { - return nil, err + return stream.Event{}, err } sn := svc.Next() if sn == nil { - return nil, ErrMissingService + return stream.Event{}, ErrMissingService } - return serviceHealthEventsForServiceNodeInternal(idx, n, sn.(*structs.ServiceNode), nodeChecks, svcChecks) + return newServiceHealthEventRegister(idx, n, sn.(*structs.ServiceNode), nodeChecks, svcChecks), nil } -func serviceHealthEventsForServiceNodeInternal(idx uint64, +func newServiceHealthEventRegister(idx uint64, node *structs.Node, sn *structs.ServiceNode, nodeChecks structs.HealthChecks, - svcChecks map[string]structs.HealthChecks) ([]stream.Event, error) { - + svcChecks map[string]structs.HealthChecks, +) stream.Event { // Start with a copy of the node checks. checks := nodeChecks for _, check := range svcChecks[sn.ServiceID] { @@ -432,7 +405,7 @@ func serviceHealthEventsForServiceNodeInternal(idx uint64, Service: sn.ToNodeService(), Checks: checks, } - e := stream.Event{ + return stream.Event{ Topic: TopicServiceHealth, Key: sn.ServiceName, Index: idx, @@ -441,16 +414,9 @@ func serviceHealthEventsForServiceNodeInternal(idx uint64, Obj: csn, }, } - - // See if we also need to emit a connect event (i.e. if this instance is a - // connect proxy or connect native app). - - return []stream.Event{e}, nil } -func serviceHealthDeregEventsForServiceInstance(idx uint64, - sn *structs.ServiceNode, entMeta *structs.EnterpriseMeta) ([]stream.Event, error) { - +func newServiceHealthEventDeregister(idx uint64, sn *structs.ServiceNode) stream.Event { // We actually only need the node name populated in the node part as it's only // used as a key to know which service was deregistered so don't bother looking // up the node in the DB. Note that while the ServiceNode does have NodeID @@ -466,7 +432,7 @@ func serviceHealthDeregEventsForServiceInstance(idx uint64, Service: sn.ToNodeService(), } - e := stream.Event{ + return stream.Event{ Topic: TopicServiceHealth, Key: sn.ServiceName, Index: idx, @@ -475,5 +441,4 @@ func serviceHealthDeregEventsForServiceInstance(idx uint64, Obj: csn, }, } - return []stream.Event{e}, nil } From 5f52220f53d595bc30adb8375263bbc6b4f44199 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Tue, 21 Jul 2020 21:33:50 -0400 Subject: [PATCH 41/73] state: fix a bug in building service health events The nodeCheck slice was being used as the first arg in append, which in some cases will modify the array backing the slice. This would lead to service checks for other services in the wrong event. Also refactor some things to reduce the arguments to functions. --- agent/consul/state/catalog_events.go | 53 +++++++++++++++------------- agent/consul/state/memdb.go | 1 - 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index a08499b11..b72b7288c 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -314,7 +314,7 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]strea return nil, err } - n, nodeChecks, svcChecks, err := getNodeAndChecks(tx, node) + n, checksFunc, err := getNodeAndChecks(tx, node) if err != nil { return nil, err } @@ -323,33 +323,30 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]strea for service := services.Next(); service != nil; service = services.Next() { sn := service.(*structs.ServiceNode) - event := newServiceHealthEventRegister(idx, n, sn, nodeChecks, svcChecks) + event := newServiceHealthEventRegister(idx, n, sn, checksFunc(sn.ServiceID)) events = append(events, event) } return events, nil } -// getNodeAndNodeChecks returns a specific node and ALL checks on that node -// (both node specific and service-specific). node-level Checks are returned as -// a slice, service-specific checks as a map of slices with the service id as -// the map key. -func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, - structs.HealthChecks, map[string]structs.HealthChecks, error) { +// getNodeAndNodeChecks returns a the node structure and a function that returns +// the full list of checks for a specific service on that node. +func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc, error) { // Fetch the node nodeRaw, err := tx.First("nodes", "id", node) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if nodeRaw == nil { - return nil, nil, nil, ErrMissingNode + return nil, nil, ErrMissingNode } n := nodeRaw.(*structs.Node) // TODO(namespace-streaming): work out what EntMeta is needed here, wildcard? iter, err := catalogListChecksByNode(tx, node, nil) if err != nil { - return nil, nil, nil, err + return nil, nil, err } var nodeChecks structs.HealthChecks @@ -366,11 +363,22 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, svcChecks[check.ServiceID] = append(svcChecks[check.ServiceID], check) } } - return n, nodeChecks, svcChecks, nil + serviceChecks := func(serviceID string) structs.HealthChecks { + // Create a new slice so that append does not modify the array backing nodeChecks. + result := make(structs.HealthChecks, 0, len(nodeChecks)) + result = append(result, nodeChecks...) + for _, check := range svcChecks[serviceID] { + result = append(result, check) + } + return result + } + return n, serviceChecks, nil } +type serviceChecksFunc func(serviceID string) structs.HealthChecks + func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTuple) (stream.Event, error) { - n, nodeChecks, svcChecks, err := getNodeAndChecks(tx, tuple.Node) + n, checksFunc, err := getNodeAndChecks(tx, tuple.Node) if err != nil { return stream.Event{}, err } @@ -380,26 +388,21 @@ func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTu return stream.Event{}, err } - sn := svc.Next() - if sn == nil { + raw := svc.Next() + if raw == nil { return stream.Event{}, ErrMissingService } - return newServiceHealthEventRegister(idx, n, sn.(*structs.ServiceNode), nodeChecks, svcChecks), nil + sn := raw.(*structs.ServiceNode) + return newServiceHealthEventRegister(idx, n, sn, checksFunc(sn.ServiceID)), nil } -func newServiceHealthEventRegister(idx uint64, +func newServiceHealthEventRegister( + idx uint64, node *structs.Node, sn *structs.ServiceNode, - nodeChecks structs.HealthChecks, - svcChecks map[string]structs.HealthChecks, + checks structs.HealthChecks, ) stream.Event { - // Start with a copy of the node checks. - checks := nodeChecks - for _, check := range svcChecks[sn.ServiceID] { - checks = append(checks, check) - } - csn := &structs.CheckServiceNode{ Node: node, Service: sn.ToNodeService(), diff --git a/agent/consul/state/memdb.go b/agent/consul/state/memdb.go index d16faa651..3fd72dfaa 100644 --- a/agent/consul/state/memdb.go +++ b/agent/consul/state/memdb.go @@ -189,7 +189,6 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { return events, nil } -// TODO: could accept a ReadTxner instead of a Store func newSnapshotHandlers(s *Store) stream.SnapshotHandlers { return stream.SnapshotHandlers{ TopicServiceHealth: serviceHealthSnapshot(s, TopicServiceHealth), From 68682e7e83b92a98caee52c892d76c524d1967a7 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Wed, 22 Jul 2020 18:01:40 -0400 Subject: [PATCH 42/73] don't over allocate slice --- agent/consul/state/catalog_events.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index b72b7288c..8e3e730b1 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -262,7 +262,7 @@ func changeTypeFromChange(change memdb.Change) changeType { // switching connection details to be the proxy instead of the actual instance // in case of a sidecar. func serviceHealthToConnectEvents(events ...stream.Event) []stream.Event { - serviceHealthConnectEvents := make([]stream.Event, 0, len(events)) + var serviceHealthConnectEvents []stream.Event for _, event := range events { if event.Topic != TopicServiceHealth { // Skip non-health or any events already emitted to Connect topic From 870823e8ed24d25b027746099dcea9184577e0a5 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Wed, 22 Jul 2020 18:41:22 -0400 Subject: [PATCH 43/73] state: use changeType in serviceChanges To be a little more explicit, instead of nil implying an indirect change --- agent/consul/state/catalog_events.go | 60 ++++++++++++++++++---------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 8e3e730b1..0e8766fe1 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -63,13 +63,36 @@ type nodeServiceTuple struct { EntMeta structs.EnterpriseMeta } +func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple { + return nodeServiceTuple{ + Node: sn.Node, + ServiceID: sn.ServiceID, + EntMeta: sn.EnterpriseMeta, + } +} + +func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServiceTuple { + return nodeServiceTuple{ + Node: hc.Node, + ServiceID: hc.ServiceID, + EntMeta: hc.EnterpriseMeta, + } +} + +type serviceChange struct { + changeType changeType + change memdb.Change +} + +var serviceChangeIndirect = serviceChange{changeType: changeIndirect} + // ServiceHealthEventsFromChanges returns all the service and Connect health // events that should be emitted given a set of changes to the state store. func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) { var events []stream.Event var nodeChanges map[string]changeType - var serviceChanges map[nodeServiceTuple]*memdb.Change + var serviceChanges map[nodeServiceTuple]serviceChange markNode := func(node string, typ changeType) { if nodeChanges == nil { @@ -83,21 +106,16 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event nodeChanges[node] = typ } } - markService := func(node, service string, entMeta structs.EnterpriseMeta, svcChange *memdb.Change) { + markService := func(key nodeServiceTuple, svcChange serviceChange) { if serviceChanges == nil { - serviceChanges = make(map[nodeServiceTuple]*memdb.Change) - } - k := nodeServiceTuple{ - Node: node, - ServiceID: service, - EntMeta: entMeta, + serviceChanges = make(map[nodeServiceTuple]serviceChange) } // If the caller has an actual service mutation ensure we store it even if // the service is already marked. If the caller is just marking the service // dirty without an node change, don't overwrite any existing node change we // know about. - if serviceChanges[k] == nil { - serviceChanges[k] = svcChange + if serviceChanges[key].changeType == changeIndirect { + serviceChanges[key] = svcChange } } @@ -114,8 +132,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event case "services": sn := changeObject(change).(*structs.ServiceNode) - changeCopy := change // TODO: why does the change need to be copied? - markService(sn.Node, sn.ServiceID, sn.EnterpriseMeta, &changeCopy) + srvChange := serviceChange{changeType: changeTypeFromChange(change), change: change} + markService(newNodeServiceTupleFromServiceNode(sn), srvChange) case "checks": // For health we only care about the scope for now to know if it's just @@ -133,14 +151,14 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } else { // Check changed which means we just need to emit for the linked // service. - markService(after.Node, after.ServiceID, after.EnterpriseMeta, nil) + markService(newNodeServiceTupleFromServiceHealthCheck(after), serviceChangeIndirect) // Edge case - if the check with same ID was updated to link to a // different service ID but the old service with old ID still exists, // then the old service instance needs updating too as it has one // fewer checks now. if before.ServiceID != after.ServiceID { - markService(before.Node, before.ServiceID, before.EnterpriseMeta, nil) + markService(newNodeServiceTupleFromServiceHealthCheck(before), serviceChangeIndirect) } } @@ -150,7 +168,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // Node level check markNode(obj.Node, changeIndirect) } else { - markService(obj.Node, obj.ServiceID, obj.EnterpriseMeta, nil) + markService(newNodeServiceTupleFromServiceHealthCheck(obj), serviceChangeIndirect) } } } @@ -172,12 +190,12 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event events = append(events, es...) } - for tuple, change := range serviceChanges { + for tuple, srvChange := range serviceChanges { // change may be nil if there was a change that _affected_ the service // like a change to checks but it didn't actually change the service // record itself. - if change != nil && change.Deleted() { - sn := change.Before.(*structs.ServiceNode) + if srvChange.changeType == changeDelete { + sn := srvChange.change.Before.(*structs.ServiceNode) e := newServiceHealthEventDeregister(changes.Index, sn) events = append(events, e) continue @@ -186,9 +204,9 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event // Check if this was a service mutation that changed it's name which // requires special handling even if node changed and new events were // already published. - if change != nil && change.Updated() { - before := change.Before.(*structs.ServiceNode) - after := change.After.(*structs.ServiceNode) + if srvChange.changeType == changeUpdate { + before := srvChange.change.Before.(*structs.ServiceNode) + after := srvChange.change.After.(*structs.ServiceNode) if before.ServiceName != after.ServiceName { // Service was renamed, the code below will ensure the new registrations From b241debee7b2e95c542c5ba0822133b70aeb8249 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Fri, 7 Aug 2020 13:00:39 -0400 Subject: [PATCH 44/73] state: improve comments in catalog_events.go Co-authored-by: Paul Banks --- agent/consul/state/catalog_events.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 0e8766fe1..2e6148f73 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -49,7 +49,8 @@ func serviceHealthSnapshot(s *Store, topic topic) stream.SnapshotFunc { event.Key = n.Service.Service } - // TODO: could all the events be appended as a single item? + // append each event as a separate item so that they can be serialized + // separately, to prevent the encoding of one massive message. buf.Append([]stream.Event{event}) } @@ -100,7 +101,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } // If the caller has an actual node mutation ensure we store it even if the // node is already marked. If the caller is just marking the node dirty - // without an node change, don't overwrite any existing node change we know + // without a node change, don't overwrite any existing node change we know // about. if nodeChanges[node] == changeIndirect { nodeChanges[node] = typ @@ -112,7 +113,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } // If the caller has an actual service mutation ensure we store it even if // the service is already marked. If the caller is just marking the service - // dirty without an node change, don't overwrite any existing node change we + // dirty without a service change, don't overwrite any existing service change we // know about. if serviceChanges[key].changeType == changeIndirect { serviceChanges[key] = svcChange @@ -146,7 +147,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event before := change.Before.(*structs.HealthCheck) after := change.After.(*structs.HealthCheck) if after.ServiceID == "" || before.ServiceID == "" { - // Either changed from or to being node-scoped + // check before and/or after is node-scoped markNode(after.Node, changeIndirect) } else { // Check changed which means we just need to emit for the linked From c17a5b0628745c0c19121a124dc31dd238070193 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Fri, 7 Aug 2020 14:18:24 -0400 Subject: [PATCH 45/73] state: handle terminating gateways in service health events --- agent/consul/state/catalog_events.go | 55 ++++++++++++++++------------ 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 2e6148f73..b42d47fc6 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -218,17 +218,8 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event events = append(events, e) } - if before.ServiceKind == structs.ServiceKindConnectProxy && - before.ServiceProxy.DestinationServiceName != after.ServiceProxy.DestinationServiceName { - // Connect proxy changed the service it is representing, need to issue a - // dereg for the old service on the Connect topic. We don't actually need - // to deregister this sidecar service though as it still exists and - // didn't change its name (or if it did that was caught just above). But - // our mechanism for connect events is to convert them so we generate - // the regular one, convert it to Connect topic and then discard the - // original. - e := newServiceHealthEventDeregister(changes.Index, before) - events = append(events, serviceHealthToConnectEvents(e)...) + if e, ok := isConnectProxyDestinationServiceChange(changes.Index, before, after); ok { + events = append(events, e) } } @@ -252,6 +243,22 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event return events, nil } +// isConnectProxyDestinationServiceChange handles the case where a Connect proxy changed +// the service it is proxying. We need to issue a de-registration for the old +// service on the Connect topic. We don't actually need to deregister this sidecar +// service though as it still exists and didn't change its name. +func isConnectProxyDestinationServiceChange(idx uint64, before, after *structs.ServiceNode) (stream.Event, bool) { + if before.ServiceKind != structs.ServiceKindConnectProxy || + before.ServiceProxy.DestinationServiceName == after.ServiceProxy.DestinationServiceName { + return stream.Event{}, false + } + + e := newServiceHealthEventDeregister(idx, before) + e.Topic = TopicServiceHealthConnect + e.Key = getPayloadCheckServiceNode(e.Payload).Service.Proxy.DestinationServiceName + return e, true +} + type changeType uint8 const ( @@ -281,33 +288,35 @@ func changeTypeFromChange(change memdb.Change) changeType { // switching connection details to be the proxy instead of the actual instance // in case of a sidecar. func serviceHealthToConnectEvents(events ...stream.Event) []stream.Event { - var serviceHealthConnectEvents []stream.Event + var result []stream.Event for _, event := range events { if event.Topic != TopicServiceHealth { // Skip non-health or any events already emitted to Connect topic continue } node := getPayloadCheckServiceNode(event.Payload) - // TODO: do we need to handle gateways here as well? - if node.Service == nil || - (node.Service.Kind != structs.ServiceKindConnectProxy && !node.Service.Connect.Native) { - // Event is not a service instance (i.e. just a node registration) - // or is not a service that is not connect-enabled in some way. + if node.Service == nil { continue } connectEvent := event connectEvent.Topic = TopicServiceHealthConnect - // If this is a proxy, set the key to the destination service name. - if node.Service.Kind == structs.ServiceKindConnectProxy { - connectEvent.Key = node.Service.Proxy.DestinationServiceName - } + switch { + case node.Service.Connect.Native: + result = append(result, connectEvent) - serviceHealthConnectEvents = append(serviceHealthConnectEvents, connectEvent) + case node.Service.Kind == structs.ServiceKindConnectProxy: + connectEvent.Key = node.Service.Proxy.DestinationServiceName + result = append(result, connectEvent) + + default: + // ServiceKindTerminatingGateway changes are handled separately. + // All other cases are not relevant to the connect topic + } } - return serviceHealthConnectEvents + return result } func getPayloadCheckServiceNode(payload interface{}) *structs.CheckServiceNode { From 4802c5fd895c18fefad206dd80cf2766b1d10052 Mon Sep 17 00:00:00 2001 From: Alvin Huang <17609145+alvin-huang@users.noreply.github.com> Date: Thu, 3 Sep 2020 23:40:23 -0400 Subject: [PATCH 46/73] switch to new aws account s3 bucket for dev artifacts (#8612) --- .circleci/config.yml | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index adb1cf6cd..a2fc6be8a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -19,7 +19,7 @@ references: EMAIL: noreply@hashicorp.com GIT_AUTHOR_NAME: circleci-consul GIT_COMMITTER_NAME: circleci-consul - S3_ARTIFACT_BUCKET: consul-dev-artifacts + S3_ARTIFACT_BUCKET: consul-dev-artifacts-v2 BASH_ENV: .circleci/bash_env.sh VAULT_BINARY_VERSION: 1.2.2 @@ -33,6 +33,27 @@ steps: curl -sSL "${url}/v${GOTESTSUM_RELEASE}/gotestsum_${GOTESTSUM_RELEASE}_linux_amd64.tar.gz" | \ sudo tar -xz --overwrite -C /usr/local/bin gotestsum + get-aws-cli: &get-aws-cli + run: + name: download and install AWS CLI + command: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + echo -e "${AWS_CLI_GPG_KEY}" | gpg --import + curl -o awscliv2.sig https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip.sig + gpg --verify awscliv2.sig awscliv2.zip + unzip awscliv2.zip + sudo ./aws/install + + aws-assume-role: &aws-assume-role + run: + name: assume-role aws creds + command: | + # assume role has duration of 15 min (the minimum allowed) + CREDENTIALS="$(aws sts assume-role --duration-seconds 900 --role-arn ${ROLE_ARN} --role-session-name build-${CIRCLE_SHA1} | jq '.Credentials')" + echo "export AWS_ACCESS_KEY_ID=$(echo $CREDENTIALS | jq -r '.AccessKeyId')" >> $BASH_ENV + echo "export AWS_SECRET_ACCESS_KEY=$(echo $CREDENTIALS | jq -r '.SecretAccessKey')" >> $BASH_ENV + echo "export AWS_SESSION_TOKEN=$(echo $CREDENTIALS | jq -r '.SessionToken')" >> $BASH_ENV + # This step MUST be at the end of any set of steps due to the 'when' condition notify-slack-failure: ¬ify-slack-failure name: notify-slack-failure @@ -389,13 +410,12 @@ jobs: # upload development build to s3 dev-upload-s3: docker: - - image: circleci/python:stretch + - image: *GOLANG_IMAGE environment: <<: *ENVIRONMENT steps: - - run: - name: Install awscli - command: sudo pip install awscli + - *get-aws-cli + - *aws-assume-role # get consul binary - attach_workspace: at: bin/ From 51f079dcdde4f0c30620f5a5a5e1ac723050dace Mon Sep 17 00:00:00 2001 From: Hans Hasselberg Date: Fri, 4 Sep 2020 11:47:16 +0200 Subject: [PATCH 47/73] secondaryIntermediateCertRenewalWatch abort on success (#8588) secondaryIntermediateCertRenewalWatch was using `retryLoopBackoff` to renew the intermediate certificate. Once it entered the inner loop and started `retryLoopBackoff` it would never leave that. `retryLoopBackoffAbortOnSuccess` will return when renewing is successful, like it was intended originally. --- agent/consul/leader_connect.go | 12 ++++++++- agent/consul/leader_connect_test.go | 41 +++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/agent/consul/leader_connect.go b/agent/consul/leader_connect.go index fcad53d65..3a7a1ce97 100644 --- a/agent/consul/leader_connect.go +++ b/agent/consul/leader_connect.go @@ -653,7 +653,7 @@ func (s *Server) secondaryIntermediateCertRenewalWatch(ctx context.Context) erro case <-ctx.Done(): return nil case <-time.After(structs.IntermediateCertRenewInterval): - retryLoopBackoff(ctx, func() error { + retryLoopBackoffAbortOnSuccess(ctx, func() error { s.caProviderReconfigurationLock.Lock() defer s.caProviderReconfigurationLock.Unlock() @@ -835,6 +835,14 @@ func (s *Server) replicateIntentions(ctx context.Context) error { // retryLoopBackoff loops a given function indefinitely, backing off exponentially // upon errors up to a maximum of maxRetryBackoff seconds. func retryLoopBackoff(ctx context.Context, loopFn func() error, errFn func(error)) { + retryLoopBackoffHandleSuccess(ctx, loopFn, errFn, false) +} + +func retryLoopBackoffAbortOnSuccess(ctx context.Context, loopFn func() error, errFn func(error)) { + retryLoopBackoffHandleSuccess(ctx, loopFn, errFn, true) +} + +func retryLoopBackoffHandleSuccess(ctx context.Context, loopFn func() error, errFn func(error), abortOnSuccess bool) { var failedAttempts uint limiter := rate.NewLimiter(loopRateLimit, retryBucketSize) for { @@ -861,6 +869,8 @@ func retryLoopBackoff(ctx context.Context, loopFn func() error, errFn func(error case <-timer.C: continue } + } else if abortOnSuccess { + return } // Reset the failed attempts after a successful run. diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index 0f8edc470..c4852d57a 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -1,6 +1,7 @@ package consul import ( + "context" "crypto/x509" "fmt" "io/ioutil" @@ -1442,3 +1443,43 @@ func TestLeader_lessThanHalfTimePassed(t *testing.T) { require.True(t, lessThanHalfTimePassed(now, now.Add(-10*time.Second), now.Add(20*time.Second))) } + +func TestLeader_retryLoopBackoffHandleSuccess(t *testing.T) { + type test struct { + desc string + loopFn func() error + abort bool + timedOut bool + } + success := func() error { + return nil + } + failure := func() error { + return fmt.Errorf("test error") + } + tests := []test{ + {"loop without error and no abortOnSuccess keeps running", success, false, true}, + {"loop with error and no abortOnSuccess keeps running", failure, false, true}, + {"loop without error and abortOnSuccess is stopped", success, true, false}, + {"loop with error and abortOnSuccess keeps running", failure, true, true}, + } + for _, tc := range tests { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + retryLoopBackoffHandleSuccess(ctx, tc.loopFn, func(_ error) {}, tc.abort) + select { + case <-ctx.Done(): + if !tc.timedOut { + t.Fatal("should not have timed out") + } + default: + if tc.timedOut { + t.Fatal("should have timed out") + } + } + }) + } +} From b57a5b4911f095c12c28060ff974fe84c0d7d208 Mon Sep 17 00:00:00 2001 From: Alvin Huang <17609145+alvin-huang@users.noreply.github.com> Date: Fri, 4 Sep 2020 10:42:18 -0400 Subject: [PATCH 48/73] add checkout to dev-upload-s3 (#8617) --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index a2fc6be8a..06bcda1f4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -414,6 +414,7 @@ jobs: environment: <<: *ENVIRONMENT steps: + - checkout - *get-aws-cli - *aws-assume-role # get consul binary From a14a31ccf101300228128f4fd830aefa894db403 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Fri, 4 Sep 2020 11:24:11 -0500 Subject: [PATCH 49/73] sdk: also print test agent logs in verbose mode (#8616) --- sdk/testutil/testlog.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/testutil/testlog.go b/sdk/testutil/testlog.go index a16396cb8..9e9ea0c99 100644 --- a/sdk/testutil/testlog.go +++ b/sdk/testutil/testlog.go @@ -25,8 +25,8 @@ func LoggerWithOutput(t testing.TB, output io.Writer) hclog.InterceptLogger { var sendTestLogsToStdout = os.Getenv("NOLOGBUFFER") == "1" // NewLogBuffer returns an io.Writer which buffers all writes. When the test -// ends, t.Failed is checked. If the test has failed all log output is printed -// to stdout. +// ends, t.Failed is checked. If the test has failed or has been run in verbose +// mode all log output is printed to stdout. // // Set the env var NOLOGBUFFER=1 to disable buffering, resulting in all log // output being written immediately to stdout. @@ -36,7 +36,7 @@ func NewLogBuffer(t CleanupT) io.Writer { } buf := &logBuffer{buf: new(bytes.Buffer)} t.Cleanup(func() { - if t.Failed() { + if t.Failed() || testing.Verbose() { buf.Lock() defer buf.Unlock() buf.buf.WriteTo(os.Stdout) From cfc4283c604bef197e4094b35335aa24983dd650 Mon Sep 17 00:00:00 2001 From: Freddy Date: Fri, 4 Sep 2020 13:38:26 -0600 Subject: [PATCH 50/73] Make LockDelay configurable in api locks (#8621) --- api/lock.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/lock.go b/api/lock.go index 5cacee8f7..221a7add3 100644 --- a/api/lock.go +++ b/api/lock.go @@ -79,6 +79,7 @@ type LockOptions struct { MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime LockTryOnce bool // Optional, defaults to false which means try forever + LockDelay time.Duration // Optional, defaults to 15s Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace } @@ -351,8 +352,9 @@ func (l *Lock) createSession() (string, error) { se := l.opts.SessionOpts if se == nil { se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + LockDelay: l.opts.LockDelay, } } w := WriteOptions{Namespace: l.opts.Namespace} From 581f19b9d4ec3861216bf8f80e2971fad3643dcc Mon Sep 17 00:00:00 2001 From: Freddy Date: Fri, 4 Sep 2020 14:07:57 -0600 Subject: [PATCH 51/73] Adds changelog entry for snapshot agent improvement (#8622) --- .changelog/_8621.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/_8621.txt diff --git a/.changelog/_8621.txt b/.changelog/_8621.txt new file mode 100644 index 000000000..194633f37 --- /dev/null +++ b/.changelog/_8621.txt @@ -0,0 +1,3 @@ +```release-note:improvement +snapshot agent: Deregister critical snapshotting TTL check if leadership is transferred. +``` \ No newline at end of file From 99b822c4b3a039465de8eb81ece127cc9c58842e Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Sun, 6 Sep 2020 11:27:39 -0500 Subject: [PATCH 52/73] api: create fresh http client for unix sockets (#8602) Co-authored-by: Matt Keeler --- .changelog/8602.txt | 3 +++ api/api.go | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .changelog/8602.txt diff --git a/.changelog/8602.txt b/.changelog/8602.txt new file mode 100644 index 000000000..49f2c74ce --- /dev/null +++ b/.changelog/8602.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Allow for the client to use TLS over a Unix domain socket. +``` diff --git a/api/api.go b/api/api.go index 7b00be967..38a4e98fb 100644 --- a/api/api.go +++ b/api/api.go @@ -607,9 +607,11 @@ func NewClient(config *Config) (*Client, error) { trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial("unix", parts[1]) } - config.HttpClient = &http.Client{ - Transport: trans, + httpClient, err := NewHttpClient(trans, config.TLSConfig) + if err != nil { + return nil, err } + config.HttpClient = httpClient default: return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) } From 6dbb5f32349748b8157beb4083fafa1075b03ad0 Mon Sep 17 00:00:00 2001 From: Tim Arenz Date: Tue, 8 Sep 2020 12:16:16 +0200 Subject: [PATCH 53/73] Add support for -ca-path option in the connect envoy command (#8606) * Add support for -ca-path option in the connect envoy command * Adding changelog entry --- .changelog/8606.txt | 3 + command/connect/envoy/envoy.go | 12 +- command/connect/envoy/envoy_test.go | 40 ++++++ .../envoy/testdata/existing-ca-path.golden | 125 ++++++++++++++++++ tlsutil/config.go | 4 +- tlsutil/config_test.go | 6 +- 6 files changed, 178 insertions(+), 12 deletions(-) create mode 100644 .changelog/8606.txt create mode 100644 command/connect/envoy/testdata/existing-ca-path.golden diff --git a/.changelog/8606.txt b/.changelog/8606.txt new file mode 100644 index 000000000..a899232f2 --- /dev/null +++ b/.changelog/8606.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: `connect envoy` command now respects the `-ca-path` flag +``` diff --git a/command/connect/envoy/envoy.go b/command/connect/envoy/envoy.go index 2fe0c978b..67530f0ec 100644 --- a/command/connect/envoy/envoy.go +++ b/command/connect/envoy/envoy.go @@ -4,7 +4,6 @@ import ( "errors" "flag" "fmt" - "io/ioutil" "net" "os" "os/exec" @@ -20,6 +19,7 @@ import ( proxyCmd "github.com/hashicorp/consul/command/connect/proxy" "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/consul/ipaddr" + "github.com/hashicorp/consul/tlsutil" ) func New(ui cli.Ui) *cmd { @@ -443,13 +443,11 @@ func (c *cmd) templateArgs() (*BootstrapTplArgs, error) { } var caPEM string - if httpCfg.TLSConfig.CAFile != "" { - content, err := ioutil.ReadFile(httpCfg.TLSConfig.CAFile) - if err != nil { - return nil, fmt.Errorf("Failed to read CA file: %s", err) - } - caPEM = strings.Replace(string(content), "\n", "\\n", -1) + pems, err := tlsutil.LoadCAs(httpCfg.TLSConfig.CAFile, httpCfg.TLSConfig.CAPath) + if err != nil { + return nil, err } + caPEM = strings.Replace(strings.Join(pems, ""), "\n", "\\n", -1) return &BootstrapTplArgs{ GRPC: grpcAddr, diff --git a/command/connect/envoy/envoy_test.go b/command/connect/envoy/envoy_test.go index d99051b1e..cab2ae9ad 100644 --- a/command/connect/envoy/envoy_test.go +++ b/command/connect/envoy/envoy_test.go @@ -370,6 +370,46 @@ func TestGenerateConfig(t *testing.T) { LocalAgentClusterName: xds.LocalAgentClusterName, }, }, + { + Name: "missing-ca-path", + Flags: []string{"-proxy-id", "test-proxy", "-ca-path", "some/path"}, + WantArgs: BootstrapTplArgs{ + EnvoyVersion: defaultEnvoyVersion, + ProxyCluster: "test-proxy", + ProxyID: "test-proxy", + // Should resolve IP, note this might not resolve the same way + // everywhere which might make this test brittle but not sure what else + // to do. + GRPC: GRPC{ + AgentAddress: "127.0.0.1", + AgentPort: "8502", + }, + }, + WantErr: "lstat some/path: no such file or directory", + }, + { + Name: "existing-ca-path", + Flags: []string{"-proxy-id", "test-proxy", "-ca-path", "../../../test/ca_path/"}, + Env: []string{"CONSUL_HTTP_SSL=1"}, + WantArgs: BootstrapTplArgs{ + EnvoyVersion: defaultEnvoyVersion, + ProxyCluster: "test-proxy", + ProxyID: "test-proxy", + // Should resolve IP, note this might not resolve the same way + // everywhere which might make this test brittle but not sure what else + // to do. + GRPC: GRPC{ + AgentAddress: "127.0.0.1", + AgentPort: "8502", + AgentTLS: true, + }, + AgentCAPEM: `-----BEGIN CERTIFICATE-----\nMIIFADCCAuqgAwIBAgIBATALBgkqhkiG9w0BAQswEzERMA8GA1UEAxMIQ2VydEF1\ndGgwHhcNMTUwNTExMjI0NjQzWhcNMjUwNTExMjI0NjU0WjATMREwDwYDVQQDEwhD\nZXJ0QXV0aDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALcMByyynHsA\n+K4PJwo5+XHygaEZAhPGvHiKQK2Cbc9NDm0ZTzx0rA/dRTZlvouhDyzcJHm+6R1F\nj6zQv7iaSC3qQtJiPnPsfZ+/0XhFZ3fQWMnfDiGbZpF1kJF01ofB6vnsuocFC0zG\naGC+SZiLAzs+QMP3Bebw1elCBIeoN+8NWnRYmLsYIaYGJGBSbNo/lCpLTuinofUn\nL3ehWEGv1INwpHnSVeN0Ml2GFe23d7PUlj/wNIHgUdpUR+KEJxIP3klwtsI3QpSH\nc4VjWdf4aIcka6K3IFuw+K0PUh3xAAPnMpAQOtCZk0AhF5rlvUbevC6jADxpKxLp\nOONmvCTer4LtyNURAoBH52vbK0r/DNcTpPEFV0IP66nXUFgkk0mRKsu8HTb4IOkC\nX3K4mp18EiWUUtrHZAnNct0iIniDBqKK0yhSNhztG6VakVt/1WdQY9Ey3mNtxN1O\nthqWFKdpKUzPKYC3P6PfVpiE7+VbWTLLXba+8BPe8BxWPsVkjJqGSGnCte4COusz\nM8/7bbTgifwJfsepwFtZG53tvwjWlO46Exl30VoDNTaIGvs1fO0GqJlh2A7FN5F2\nS1rS5VYHtPK8QdmUSvyq+7JDBc1HNT5I2zsIQbNcLwDTZ5EsbU6QR7NHDJKxjv/w\nbs3eTXJSSNcFD74wRU10pXjgE5wOFu9TAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIA\nBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQHazgZ3Puiuc6K2LzgcX5b6fAC\nPzAfBgNVHSMEGDAWgBQHazgZ3Puiuc6K2LzgcX5b6fACPzALBgkqhkiG9w0BAQsD\nggIBAEmeNrSUhpHg1I8dtfqu9hCU/6IZThjtcFA+QcPkkMa+Z1k0SOtsgW8MdlcA\ngCf5g5yQZ0DdpWM9nDB6xDIhQdccm91idHgf8wmpEHUj0an4uyn2ESCt8eqrAWf7\nAClYORCASTYfguJCxcfvwtI1uqaOeCxSOdmFay79UVitVsWeonbCRGsVgBDifJxw\nG2oCQqoYAmXPM4J6syk5GHhB1O9MMq+g1+hOx9s+XHyTui9FL4V+IUO1ygVqEQB5\nPSiRBvcIsajSGVao+vK0gf2XfcXzqr3y3NhBky9rFMp1g+ykb2yWekV4WiROJlCj\nTsWwWZDRyjiGahDbho/XW8JciouHZhJdjhmO31rqW3HdFviCTdXMiGk3GQIzz/Jg\nP+enOaHXoY9lcxzDvY9z1BysWBgNvNrMnVge/fLP9o+a0a0PRIIVl8T0Ef3zeg1O\nCLCSy/1Vae5Tx63ZTFvGFdOSusYkG9rlAUHXZE364JRCKzM9Bz0bM+t+LaO0MaEb\nYoxcXEPU+gB2IvmARpInN3oHexR6ekuYHVTRGdWrdmuHFzc7eFwygRqTFdoCCU+G\nQZEkd+lOEyv0zvQqYg+Jp0AEGz2B2zB53uBVECtn0EqrSdPtRzUBSByXVs6QhSXn\neVmy+z3U3MecP63X6oSPXekqSyZFuegXpNNuHkjNoL4ep2ix\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+gAwIBAgIJAIewRMI8OnvTMA0GCSqGSIb3DQEBBQUAMIGYMQswCQYD\nVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHDAa\nBgNVBAoTE0hhc2hpQ29ycCBUZXN0IENlcnQxDDAKBgNVBAsTA0RldjEWMBQGA1UE\nAxMNdGVzdC5pbnRlcm5hbDEgMB4GCSqGSIb3DQEJARYRdGVzdEBpbnRlcm5hbC5j\nb20wHhcNMTQwNDA3MTkwMTA4WhcNMjQwNDA0MTkwMTA4WjCBmDELMAkGA1UEBhMC\nVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRwwGgYDVQQK\nExNIYXNoaUNvcnAgVGVzdCBDZXJ0MQwwCgYDVQQLEwNEZXYxFjAUBgNVBAMTDXRl\nc3QuaW50ZXJuYWwxIDAeBgkqhkiG9w0BCQEWEXRlc3RAaW50ZXJuYWwuY29tMIIB\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxrs6JK4NpiOItxrpNR/1ppUU\nmH7p2BgLCBZ6eHdclle9J56i68adt8J85zaqphCfz6VDP58DsFx+N50PZyjQaDsU\nd0HejRqfHRMtg2O+UQkv4Z66+Vo+gc6uGuANi2xMtSYDVTAqqzF48OOPQDgYkzcG\nxcFZzTRFFZt2vPnyHj8cHcaFo/NMNVh7C3yTXevRGNm9u2mrbxCEeiHzFC2WUnvg\nU2jQuC7Fhnl33Zd3B6d3mQH6O23ncmwxTcPUJe6xZaIRrDuzwUcyhLj5Z3faag/f\npFIIcHSiHRfoqHLGsGg+3swId/zVJSSDHr7pJUu7Cre+vZa63FqDaooqvnisrQID\nAQABo4IBADCB/TAdBgNVHQ4EFgQUo/nrOfqvbee2VklVKIFlyQEbuJUwgc0GA1Ud\nIwSBxTCBwoAUo/nrOfqvbee2VklVKIFlyQEbuJWhgZ6kgZswgZgxCzAJBgNVBAYT\nAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEcMBoGA1UE\nChMTSGFzaGlDb3JwIFRlc3QgQ2VydDEMMAoGA1UECxMDRGV2MRYwFAYDVQQDEw10\nZXN0LmludGVybmFsMSAwHgYJKoZIhvcNAQkBFhF0ZXN0QGludGVybmFsLmNvbYIJ\nAIewRMI8OnvTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADa9fV9h\ngjapBlkNmu64WX0Ufub5dsJrdHS8672P30S7ILB7Mk0W8sL65IezRsZnG898yHf9\n2uzmz5OvNTM9K380g7xFlyobSVq+6yqmmSAlA/ptAcIIZT727P5jig/DB7fzJM3g\njctDlEGOmEe50GQXc25VKpcpjAsNQi5ER5gowQ0v3IXNZs+yU+LvxLHc0rUJ/XSp\nlFCAMOqd5uRoMOejnT51G6krvLNzPaQ3N9jQfNVY4Q0zfs0M+6dRWvqfqB9Vyq8/\nPOLMld+HyAZEBk9zK3ZVIXx6XS4dkDnSNR91njLq7eouf6M7+7s/oMQZZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----\n`, + AdminAccessLogPath: "/dev/null", + AdminBindAddress: "127.0.0.1", + AdminBindPort: "19000", + LocalAgentClusterName: xds.LocalAgentClusterName, + }, + }, { Name: "custom-bootstrap", Flags: []string{"-proxy-id", "test-proxy"}, diff --git a/command/connect/envoy/testdata/existing-ca-path.golden b/command/connect/envoy/testdata/existing-ca-path.golden new file mode 100644 index 000000000..1dd467ce4 --- /dev/null +++ b/command/connect/envoy/testdata/existing-ca-path.golden @@ -0,0 +1,125 @@ +{ + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "127.0.0.1", + "port_value": 19000 + } + } + }, + "node": { + "cluster": "test-proxy", + "id": "test-proxy", + "metadata": { + "namespace": "default", + "envoy_version": "1.15.0" + } + }, + "static_resources": { + "clusters": [ + { + "name": "local_agent", + "connect_timeout": "1s", + "type": "STATIC", + "tls_context": { + "common_tls_context": { + "validation_context": { + "trusted_ca": { + "inline_string": "-----BEGIN CERTIFICATE-----\nMIIFADCCAuqgAwIBAgIBATALBgkqhkiG9w0BAQswEzERMA8GA1UEAxMIQ2VydEF1\ndGgwHhcNMTUwNTExMjI0NjQzWhcNMjUwNTExMjI0NjU0WjATMREwDwYDVQQDEwhD\nZXJ0QXV0aDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALcMByyynHsA\n+K4PJwo5+XHygaEZAhPGvHiKQK2Cbc9NDm0ZTzx0rA/dRTZlvouhDyzcJHm+6R1F\nj6zQv7iaSC3qQtJiPnPsfZ+/0XhFZ3fQWMnfDiGbZpF1kJF01ofB6vnsuocFC0zG\naGC+SZiLAzs+QMP3Bebw1elCBIeoN+8NWnRYmLsYIaYGJGBSbNo/lCpLTuinofUn\nL3ehWEGv1INwpHnSVeN0Ml2GFe23d7PUlj/wNIHgUdpUR+KEJxIP3klwtsI3QpSH\nc4VjWdf4aIcka6K3IFuw+K0PUh3xAAPnMpAQOtCZk0AhF5rlvUbevC6jADxpKxLp\nOONmvCTer4LtyNURAoBH52vbK0r/DNcTpPEFV0IP66nXUFgkk0mRKsu8HTb4IOkC\nX3K4mp18EiWUUtrHZAnNct0iIniDBqKK0yhSNhztG6VakVt/1WdQY9Ey3mNtxN1O\nthqWFKdpKUzPKYC3P6PfVpiE7+VbWTLLXba+8BPe8BxWPsVkjJqGSGnCte4COusz\nM8/7bbTgifwJfsepwFtZG53tvwjWlO46Exl30VoDNTaIGvs1fO0GqJlh2A7FN5F2\nS1rS5VYHtPK8QdmUSvyq+7JDBc1HNT5I2zsIQbNcLwDTZ5EsbU6QR7NHDJKxjv/w\nbs3eTXJSSNcFD74wRU10pXjgE5wOFu9TAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIA\nBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQHazgZ3Puiuc6K2LzgcX5b6fAC\nPzAfBgNVHSMEGDAWgBQHazgZ3Puiuc6K2LzgcX5b6fACPzALBgkqhkiG9w0BAQsD\nggIBAEmeNrSUhpHg1I8dtfqu9hCU/6IZThjtcFA+QcPkkMa+Z1k0SOtsgW8MdlcA\ngCf5g5yQZ0DdpWM9nDB6xDIhQdccm91idHgf8wmpEHUj0an4uyn2ESCt8eqrAWf7\nAClYORCASTYfguJCxcfvwtI1uqaOeCxSOdmFay79UVitVsWeonbCRGsVgBDifJxw\nG2oCQqoYAmXPM4J6syk5GHhB1O9MMq+g1+hOx9s+XHyTui9FL4V+IUO1ygVqEQB5\nPSiRBvcIsajSGVao+vK0gf2XfcXzqr3y3NhBky9rFMp1g+ykb2yWekV4WiROJlCj\nTsWwWZDRyjiGahDbho/XW8JciouHZhJdjhmO31rqW3HdFviCTdXMiGk3GQIzz/Jg\nP+enOaHXoY9lcxzDvY9z1BysWBgNvNrMnVge/fLP9o+a0a0PRIIVl8T0Ef3zeg1O\nCLCSy/1Vae5Tx63ZTFvGFdOSusYkG9rlAUHXZE364JRCKzM9Bz0bM+t+LaO0MaEb\nYoxcXEPU+gB2IvmARpInN3oHexR6ekuYHVTRGdWrdmuHFzc7eFwygRqTFdoCCU+G\nQZEkd+lOEyv0zvQqYg+Jp0AEGz2B2zB53uBVECtn0EqrSdPtRzUBSByXVs6QhSXn\neVmy+z3U3MecP63X6oSPXekqSyZFuegXpNNuHkjNoL4ep2ix\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIEtzCCA5+gAwIBAgIJAIewRMI8OnvTMA0GCSqGSIb3DQEBBQUAMIGYMQswCQYD\nVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xHDAa\nBgNVBAoTE0hhc2hpQ29ycCBUZXN0IENlcnQxDDAKBgNVBAsTA0RldjEWMBQGA1UE\nAxMNdGVzdC5pbnRlcm5hbDEgMB4GCSqGSIb3DQEJARYRdGVzdEBpbnRlcm5hbC5j\nb20wHhcNMTQwNDA3MTkwMTA4WhcNMjQwNDA0MTkwMTA4WjCBmDELMAkGA1UEBhMC\nVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRwwGgYDVQQK\nExNIYXNoaUNvcnAgVGVzdCBDZXJ0MQwwCgYDVQQLEwNEZXYxFjAUBgNVBAMTDXRl\nc3QuaW50ZXJuYWwxIDAeBgkqhkiG9w0BCQEWEXRlc3RAaW50ZXJuYWwuY29tMIIB\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxrs6JK4NpiOItxrpNR/1ppUU\nmH7p2BgLCBZ6eHdclle9J56i68adt8J85zaqphCfz6VDP58DsFx+N50PZyjQaDsU\nd0HejRqfHRMtg2O+UQkv4Z66+Vo+gc6uGuANi2xMtSYDVTAqqzF48OOPQDgYkzcG\nxcFZzTRFFZt2vPnyHj8cHcaFo/NMNVh7C3yTXevRGNm9u2mrbxCEeiHzFC2WUnvg\nU2jQuC7Fhnl33Zd3B6d3mQH6O23ncmwxTcPUJe6xZaIRrDuzwUcyhLj5Z3faag/f\npFIIcHSiHRfoqHLGsGg+3swId/zVJSSDHr7pJUu7Cre+vZa63FqDaooqvnisrQID\nAQABo4IBADCB/TAdBgNVHQ4EFgQUo/nrOfqvbee2VklVKIFlyQEbuJUwgc0GA1Ud\nIwSBxTCBwoAUo/nrOfqvbee2VklVKIFlyQEbuJWhgZ6kgZswgZgxCzAJBgNVBAYT\nAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEcMBoGA1UE\nChMTSGFzaGlDb3JwIFRlc3QgQ2VydDEMMAoGA1UECxMDRGV2MRYwFAYDVQQDEw10\nZXN0LmludGVybmFsMSAwHgYJKoZIhvcNAQkBFhF0ZXN0QGludGVybmFsLmNvbYIJ\nAIewRMI8OnvTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADa9fV9h\ngjapBlkNmu64WX0Ufub5dsJrdHS8672P30S7ILB7Mk0W8sL65IezRsZnG898yHf9\n2uzmz5OvNTM9K380g7xFlyobSVq+6yqmmSAlA/ptAcIIZT727P5jig/DB7fzJM3g\njctDlEGOmEe50GQXc25VKpcpjAsNQi5ER5gowQ0v3IXNZs+yU+LvxLHc0rUJ/XSp\nlFCAMOqd5uRoMOejnT51G6krvLNzPaQ3N9jQfNVY4Q0zfs0M+6dRWvqfqB9Vyq8/\nPOLMld+HyAZEBk9zK3ZVIXx6XS4dkDnSNR91njLq7eouf6M7+7s/oMQZZRtAfQ6r\nwlW975rYa1ZqEdA=\n-----END CERTIFICATE-----\n" + } + } + } + }, + "http2_protocol_options": {}, + "hosts": [ + { + "socket_address": { + "address": "127.0.0.1", + "port_value": 8502 + } + } + ] + } + ] + }, + "stats_config": { + "stats_tags": [ + { + "regex": "^cluster\\.((?:([^.]+)~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)", + "tag_name": "consul.custom_hash" + }, + { + "regex": "^cluster\\.((?:[^.]+~)?(?:([^.]+)\\.)?[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)", + "tag_name": "consul.service_subset" + }, + { + "regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?([^.]+)\\.[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)", + "tag_name": "consul.service" + }, + { + "regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.([^.]+)\\.[^.]+\\.[^.]+\\.[^.]+\\.consul\\.)", + "tag_name": "consul.namespace" + }, + { + "regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.([^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)", + "tag_name": "consul.datacenter" + }, + { + "regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.([^.]+)\\.[^.]+\\.consul\\.)", + "tag_name": "consul.routing_type" + }, + { + "regex": "^cluster\\.((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.([^.]+)\\.consul\\.)", + "tag_name": "consul.trust_domain" + }, + { + "regex": "^cluster\\.(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+)\\.[^.]+\\.[^.]+\\.consul\\.)", + "tag_name": "consul.target" + }, + { + "regex": "^cluster\\.(((?:[^.]+~)?(?:[^.]+\\.)?[^.]+\\.[^.]+\\.[^.]+\\.[^.]+\\.[^.]+)\\.consul\\.)", + "tag_name": "consul.full_target" + }, + { + "tag_name": "local_cluster", + "fixed_value": "test-proxy" + } + ], + "use_all_default_tags": true + }, + "dynamic_resources": { + "lds_config": { + "ads": {} + }, + "cds_config": { + "ads": {} + }, + "ads_config": { + "api_type": "GRPC", + "grpc_services": { + "initial_metadata": [ + { + "key": "x-consul-token", + "value": "" + } + ], + "envoy_grpc": { + "cluster_name": "local_agent" + } + } + } + }, + "layered_runtime": { + "layers": [ + { + "name": "static_layer", + "static_layer": { + "envoy.deprecated_features:envoy.api.v2.Cluster.tls_context": true, + "envoy.deprecated_features:envoy.config.trace.v2.ZipkinConfig.HTTP_JSON_V1": true, + "envoy.deprecated_features:envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing.operation_name": true + } + } + ] + } +} diff --git a/tlsutil/config.go b/tlsutil/config.go index 1875dcea6..c966ec724 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -238,7 +238,7 @@ func (c *Configurator) Update(config Config) error { if err != nil { return err } - pems, err := loadCAs(config.CAFile, config.CAPath) + pems, err := LoadCAs(config.CAFile, config.CAPath) if err != nil { return err } @@ -420,7 +420,7 @@ func loadKeyPair(certFile, keyFile string) (*tls.Certificate, error) { return &cert, nil } -func loadCAs(caFile, caPath string) ([]string, error) { +func LoadCAs(caFile, caPath string) ([]string, error) { if caFile == "" && caPath == "" { return nil, nil } diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index 2ef550999..59dbe17f1 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -519,7 +519,7 @@ func TestConfigurator_ErrorPropagation(t *testing.T) { if !v.excludeCheck { cert, err := v.config.KeyPair() require.NoError(t, err, info) - pems, err := loadCAs(v.config.CAFile, v.config.CAPath) + pems, err := LoadCAs(v.config.CAFile, v.config.CAPath) require.NoError(t, err, info) pool, err := pool(pems) require.NoError(t, err, info) @@ -562,7 +562,7 @@ func TestConfigurator_CommonTLSConfigServerNameNodeName(t *testing.T) { } } -func TestConfigurator_loadCAs(t *testing.T) { +func TestConfigurator_LoadCAs(t *testing.T) { type variant struct { cafile, capath string shouldErr bool @@ -579,7 +579,7 @@ func TestConfigurator_loadCAs(t *testing.T) { {"../test/ca/root.cer", "../test/ca_path", false, false, 1}, } for i, v := range variants { - pems, err1 := loadCAs(v.cafile, v.capath) + pems, err1 := LoadCAs(v.cafile, v.capath) pool, err2 := pool(pems) info := fmt.Sprintf("case %d", i) if v.shouldErr { From b0f08fe8e2b467640ca1b6da77456f33a584c471 Mon Sep 17 00:00:00 2001 From: Kevin Pruett Date: Tue, 1 Sep 2020 11:59:08 -0400 Subject: [PATCH 54/73] Integrate @hashicorp/react-search into layout --- website/components/search-bar/index.jsx | 28 ++++++ website/components/search-bar/style.css | 4 + website/layouts/api.jsx | 19 +++- website/layouts/docs.jsx | 19 +++- website/layouts/intro.jsx | 19 +++- website/package-lock.json | 65 ++++++++++++ website/package.json | 9 +- website/pages/style.css | 42 ++++---- website/scripts/index_search_content.js | 126 +----------------------- 9 files changed, 165 insertions(+), 166 deletions(-) create mode 100644 website/components/search-bar/index.jsx create mode 100644 website/components/search-bar/style.css diff --git a/website/components/search-bar/index.jsx b/website/components/search-bar/index.jsx new file mode 100644 index 000000000..5d998ca88 --- /dev/null +++ b/website/components/search-bar/index.jsx @@ -0,0 +1,28 @@ +import Search from '@hashicorp/react-search' + +export default function SearchBar() { + return ( + ( + <> + + + + + + + + )} + resolveHitLink={(hit) => ({ + href: { + pathname: `/${transformIdtoUrl(hit.objectID)}`, + }, + })} + placeholder="Search Consul documentation" + /> + ) +} + +function transformIdtoUrl(id) { + return id.replace(/\/index$/, '') +} diff --git a/website/components/search-bar/style.css b/website/components/search-bar/style.css new file mode 100644 index 000000000..3004fb498 --- /dev/null +++ b/website/components/search-bar/style.css @@ -0,0 +1,4 @@ +.g-search { + width: calc(100% - 2rem); + max-width: 600px; +} diff --git a/website/layouts/api.jsx b/website/layouts/api.jsx index 9858f78b0..706032295 100644 --- a/website/layouts/api.jsx +++ b/website/layouts/api.jsx @@ -1,18 +1,21 @@ -import DocsPage from '@hashicorp/react-docs-page' -import order from '../data/api-navigation.js' -import { frontMatter as data } from '../pages/api-docs/**/*.mdx' import Head from 'next/head' import Link from 'next/link' import { createMdxProvider } from '@hashicorp/nextjs-scripts/lib/providers/docs' +import DocsPage from '@hashicorp/react-docs-page' +import { SearchProvider } from '@hashicorp/react-search' +import SearchBar from '../components/search-bar' +import { frontMatter as data } from '../pages/api-docs/**/*.mdx' +import order from '../data/api-navigation.js' const MDXProvider = createMdxProvider({ product: 'consul' }) function ApiDocsLayoutWrapper(pageMeta) { function ApiDocsLayout(props) { + const { children, ...propsWithoutChildren } = props return ( + > + + + {children} + + ) } diff --git a/website/layouts/docs.jsx b/website/layouts/docs.jsx index 97c4e8eb6..08f0c5da8 100644 --- a/website/layouts/docs.jsx +++ b/website/layouts/docs.jsx @@ -1,18 +1,21 @@ -import DocsPage from '@hashicorp/react-docs-page' -import order from '../data/docs-navigation.js' -import { frontMatter as data } from '../pages/docs/**/*.mdx' import Head from 'next/head' import Link from 'next/link' import { createMdxProvider } from '@hashicorp/nextjs-scripts/lib/providers/docs' +import DocsPage from '@hashicorp/react-docs-page' +import { SearchProvider } from '@hashicorp/react-search' +import SearchBar from '../components/search-bar' +import { frontMatter as data } from '../pages/docs/**/*.mdx' +import order from '../data/docs-navigation.js' const MDXProvider = createMdxProvider({ product: 'consul' }) function DocsLayoutWrapper(pageMeta) { function DocsLayout(props) { + const { children, ...propsWithoutChildren } = props return ( + > + + + {children} + + ) } diff --git a/website/layouts/intro.jsx b/website/layouts/intro.jsx index 2813310ce..9394a2ea4 100644 --- a/website/layouts/intro.jsx +++ b/website/layouts/intro.jsx @@ -1,18 +1,21 @@ -import DocsPage from '@hashicorp/react-docs-page' -import order from '../data/intro-navigation.js' -import { frontMatter as data } from '../pages/intro/**/*.mdx' import Head from 'next/head' import Link from 'next/link' import { createMdxProvider } from '@hashicorp/nextjs-scripts/lib/providers/docs' +import DocsPage from '@hashicorp/react-docs-page' +import { SearchProvider } from '@hashicorp/react-search' +import SearchBar from '../components/search-bar' +import { frontMatter as data } from '../pages/intro/**/*.mdx' +import order from '../data/intro-navigation.js' const MDXProvider = createMdxProvider({ product: 'consul' }) function IntroLayoutWrapper(pageMeta) { function IntroLayout(props) { + const { children, ...propsWithoutChildren } = props return ( + > + + + {children} + + ) } diff --git a/website/package-lock.json b/website/package-lock.json index afd97fb24..926667eaa 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -3768,6 +3768,23 @@ "@hashicorp/react-image": "^2.0.3" } }, + "@hashicorp/react-search": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@hashicorp/react-search/-/react-search-2.1.0.tgz", + "integrity": "sha512-vaTht+2G9ipsVyusK3b3TtUpuy9ccsxj3NMSWXJyGsoT39K1Oovb8aLiIlbUU5Ll72KEi5yq5OS3WAJDdSqW+g==", + "requires": { + "@hashicorp/react-inline-svg": "^1.0.2", + "@hashicorp/remark-plugins": "^3.0.0", + "algoliasearch": "^4.4.0", + "dotenv": "^8.2.0", + "glob": "^7.1.6", + "gray-matter": "^4.0.2", + "react-instantsearch-dom": "^6.7.0", + "remark": "^12.0.1", + "search-insights": "^1.6.0", + "unist-util-visit": "^2.0.3" + } + }, "@hashicorp/react-section-header": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/@hashicorp/react-section-header/-/react-section-header-2.0.2.tgz", @@ -4584,6 +4601,21 @@ "@algolia/transporter": "4.4.0" } }, + "algoliasearch-helper": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.2.2.tgz", + "integrity": "sha512-/3XvE33R+gQKaiPdy3nmHYqhF8hqIu8xnlOicVxb1fD6uMFmxW8rGLzzrRfsPfxgAfm+c1NslLb3TzQVIB8aVA==", + "requires": { + "events": "^1.1.1" + }, + "dependencies": { + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + } + } + }, "ally.js": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/ally.js/-/ally.js-1.4.1.tgz", @@ -14310,6 +14342,34 @@ } } }, + "react-fast-compare": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.0.tgz", + "integrity": "sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==" + }, + "react-instantsearch-core": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/react-instantsearch-core/-/react-instantsearch-core-6.7.0.tgz", + "integrity": "sha512-wIvSIwkWfqPbaQZcbKsfBK3Gpm1e7ahSwU8Bmx1N5RfUqA/NghqS0Ppv3sz4vCXjoEAdPV06R+Fpn9lT+cE9/Q==", + "requires": { + "@babel/runtime": "^7.1.2", + "algoliasearch-helper": "^3.1.0", + "prop-types": "^15.5.10", + "react-fast-compare": "^3.0.0" + } + }, + "react-instantsearch-dom": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/react-instantsearch-dom/-/react-instantsearch-dom-6.7.0.tgz", + "integrity": "sha512-J1C9xkHHLLa6rkKLKFDa7szA0TDo6yPFGmDzh2+JLaq4o694RIqivfUpROHus0Ki3BAQu9QmzLtodf6K1NOBWQ==", + "requires": { + "@babel/runtime": "^7.1.2", + "algoliasearch-helper": "^3.1.0", + "classnames": "^2.2.5", + "prop-types": "^15.5.10", + "react-instantsearch-core": "^6.7.0" + } + }, "react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", @@ -15320,6 +15380,11 @@ "ajv-keywords": "^3.1.0" } }, + "search-insights": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-1.6.2.tgz", + "integrity": "sha512-mpy+57HZVMZH5HsMHYMCLvkf+tUvhy+ycP2tDy1j7wmj+mQsNZ3LC61IcMYomok02NozaMR3GiGyfH6uc+ibdA==" + }, "section-matter": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", diff --git a/website/package.json b/website/package.json index 1079ffef5..40fe6af15 100644 --- a/website/package.json +++ b/website/package.json @@ -22,6 +22,7 @@ "@hashicorp/react-mega-nav": "4.0.1-2", "@hashicorp/react-product-downloader": "4.1.1", "@hashicorp/react-product-features-list": "1.0.3", + "@hashicorp/react-search": "^2.1.0", "@hashicorp/react-section-header": "2.0.2", "@hashicorp/react-subnav": "3.2.6", "@hashicorp/react-text-and-content": "4.1.4", @@ -31,21 +32,15 @@ "@hashicorp/react-text-split-with-logo-grid": "1.3.0", "@hashicorp/react-use-cases": "1.0.6", "@hashicorp/react-vertical-text-block-list": "2.0.3", - "algoliasearch": "4.4.0", "babel-plugin-import-glob-array": "0.2.0", - "dotenv": "8.2.0", - "gray-matter": "4.0.2", "next": "9.4.4", "nuka-carousel": "4.7.0", "react": "16.13.1", "react-device-detect": "1.13.1", - "react-dom": "16.13.1", - "remark": "12.0.1", - "unist-util-visit": "2.0.3" + "react-dom": "16.13.1" }, "devDependencies": { "dart-linkcheck": "2.0.15", - "glob": "7.1.6", "husky": "4.2.5", "prettier": "2.0.5" }, diff --git a/website/pages/style.css b/website/pages/style.css index 8d323a889..ed2f68996 100644 --- a/website/pages/style.css +++ b/website/pages/style.css @@ -10,31 +10,32 @@ --highlight-color: var(--consul); } -@import '~@hashicorp/react-button/dist/style.css'; -@import '~@hashicorp/react-section-header/dist/style.css'; -@import '~@hashicorp/react-logo-grid/dist/style.css'; -@import '~@hashicorp/react-product-features-list/dist/style.css'; -@import '~@hashicorp/react-product-downloader/dist/style.css'; -@import '~@hashicorp/react-vertical-text-block-list/dist/style.css'; -@import '~@hashicorp/react-docs-sidenav/dist/style.css'; -@import '~@hashicorp/react-content/dist/style.css'; -@import '~@hashicorp/react-subnav/dist/style.css'; -@import '~@hashicorp/react-text-and-content/dist/style.css'; -@import '~@hashicorp/react-consent-manager/dist/style.css'; -@import '~@hashicorp/react-toggle/dist/style.css'; +@import '~@hashicorp/react-alert-banner/dist/style.css'; @import '~@hashicorp/react-alert/dist/style.css'; -@import '~@hashicorp/react-text-split/dist/style.css'; -@import '~@hashicorp/react-text-split-with-code/dist/style.css'; -@import '~@hashicorp/react-enterprise-alert/dist/style.css'; -@import '~@hashicorp/react-mega-nav/style.css'; -@import '~@hashicorp/react-docs-page/style.css'; +@import '~@hashicorp/react-button/dist/style.css'; @import '~@hashicorp/react-call-to-action/dist/style.css'; @import '~@hashicorp/react-case-study-slider/dist/style.css'; -@import '~@hashicorp/react-tabs/dist/style.css'; @import '~@hashicorp/react-code-block/dist/style.css'; -@import '~@hashicorp/react-alert-banner/dist/style.css'; -@import '~@hashicorp/react-use-cases/dist/style.css'; +@import '~@hashicorp/react-consent-manager/dist/style.css'; +@import '~@hashicorp/react-content/dist/style.css'; +@import '~@hashicorp/react-docs-page/style.css'; +@import '~@hashicorp/react-docs-sidenav/dist/style.css'; +@import '~@hashicorp/react-enterprise-alert/dist/style.css'; @import '~@hashicorp/react-featured-slider/dist/style.css'; +@import '~@hashicorp/react-logo-grid/dist/style.css'; +@import '~@hashicorp/react-mega-nav/style.css'; +@import '~@hashicorp/react-product-downloader/dist/style.css'; +@import '~@hashicorp/react-product-features-list/dist/style.css'; +@import '~@hashicorp/react-search/dist/style.css'; +@import '~@hashicorp/react-section-header/dist/style.css'; +@import '~@hashicorp/react-subnav/dist/style.css'; +@import '~@hashicorp/react-tabs/dist/style.css'; +@import '~@hashicorp/react-text-and-content/dist/style.css'; +@import '~@hashicorp/react-text-split-with-code/dist/style.css'; +@import '~@hashicorp/react-text-split/dist/style.css'; +@import '~@hashicorp/react-toggle/dist/style.css'; +@import '~@hashicorp/react-use-cases/dist/style.css'; +@import '~@hashicorp/react-vertical-text-block-list/dist/style.css'; /* Local Components */ @import '../components/basic-hero/style.css'; @@ -43,6 +44,7 @@ @import '../components/learn-callout/style.css'; @import '../components/case-study-carousel/style.css'; @import '../components/cloud-offerings-list/style.css'; +@import '../components/search-bar/style.css'; /* Layouts */ @import '../layouts/use-cases/style.css'; diff --git a/website/scripts/index_search_content.js b/website/scripts/index_search_content.js index 7eeb7524f..f853b4ddf 100644 --- a/website/scripts/index_search_content.js +++ b/website/scripts/index_search_content.js @@ -1,125 +1,3 @@ -require('dotenv').config() +const { indexDocsContent } = require('@hashicorp/react-search/tools') -const algoliasearch = require('algoliasearch') -const glob = require('glob') -const matter = require('gray-matter') -const path = require('path') -const remark = require('remark') -const visit = require('unist-util-visit') - -// In addition to the content of the page, -// define additional front matter attributes that will be search-indexable -const SEARCH_DIMENSIONS = ['page_title', 'description'] - -main() - -async function main() { - const pagesFolder = path.join(__dirname, '../pages') - - // Grab all search-indexable content and format for Algolia - const searchObjects = await Promise.all( - glob.sync(path.join(pagesFolder, '**/*.mdx')).map(async (fullPath) => { - const { content, data } = matter.read(fullPath) - - const searchableDimensions = SEARCH_DIMENSIONS.reduce( - (acc, dimension) => { - return { ...acc, [dimension]: data[dimension] } - }, - {} - ) - - const headings = await collectHeadings(content) - - // Get path relative to `pages` - const __resourcePath = fullPath.replace(`${pagesFolder}/`, '') - - // Use clean URL for Algolia id - const objectID = __resourcePath.replace('.mdx', '') - - return { - ...searchableDimensions, - headings, - objectID, - } - }) - ) - - try { - await indexSearchContent(searchObjects) - } catch (e) { - console.error(e) - process.exit(1) - } -} - -async function indexSearchContent(objects) { - const { - NEXT_PUBLIC_ALGOLIA_APP_ID: appId, - NEXT_PUBLIC_ALGOLIA_INDEX: index, - ALGOLIA_API_KEY: apiKey, - } = process.env - - if (!apiKey || !appId || !index) { - throw new Error( - `[*** Algolia Search Indexing Error ***] Received: ALGOLIA_API_KEY=${apiKey} ALGOLIA_APP_ID=${appId} ALGOLIA_INDEX=${index} \n Please ensure all Algolia Search-related environment vars are set in CI settings.` - ) - } - - console.log(`updating ${objects.length} indices...`) - - try { - const searchClient = algoliasearch(appId, apiKey) - const searchIndex = searchClient.initIndex(index) - - const { objectIDs } = await searchIndex.partialUpdateObjects(objects, { - createIfNotExists: true, - }) - - let staleIds = [] - - await searchIndex.browseObjects({ - query: '', - batch: (batch) => { - staleIds = staleIds.concat( - batch - .filter(({ objectID }) => !objectIDs.includes(objectID)) - .map(({ objectID }) => objectID) - ) - }, - }) - - if (staleIds.length > 0) { - console.log(`deleting ${staleIds.length} stale indices:`) - console.log(staleIds) - - await searchIndex.deleteObjects(staleIds) - } - - console.log('done') - process.exit(0) - } catch (error) { - throw new Error(error) - } -} - -async function collectHeadings(mdxContent) { - const headings = [] - - const headingMapper = () => (tree) => { - visit(tree, 'heading', (node) => { - const title = node.children.reduce((m, n) => { - if (n.value) m += n.value - return m - }, '') - // Only include level 1 or level 2 headings - if (node.depth < 3) { - headings.push(title) - } - }) - } - - return remark() - .use(headingMapper) - .process(mdxContent) - .then(() => headings) -} +indexDocsContent() From 719067af0d12de5b35cbd3ea5ef8c4e4052cbf12 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Tue, 8 Sep 2020 11:11:48 -0700 Subject: [PATCH 55/73] Update useSystemRoots docs for k8s --- website/pages/docs/k8s/installation/helm.mdx | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/website/pages/docs/k8s/installation/helm.mdx b/website/pages/docs/k8s/installation/helm.mdx index 13879b8ee..e238549c5 100644 --- a/website/pages/docs/k8s/installation/helm.mdx +++ b/website/pages/docs/k8s/installation/helm.mdx @@ -386,8 +386,13 @@ and consider if they're appropriate for your deployment. - `tlsServerName` ((#v-externalservers-tlsservername)) (`string: null`) - The server name to use as the SNI host header when connecting with HTTPS. - - `useSystemRoots` ((#v-externalservers-usesystemroots)) (`boolean: false`) - If true, the Helm chart will ignore the CA set in `global.tls.caCert` - and will rely on the container's system CAs for TLS verification when talking to Consul servers. Otherwise, the chart will use `global.tls.caCert`. + - `useSystemRoots` ((#v-externalservers-usesystemroots)) (`boolean: false`) - If true, consul-k8s components will ignore the CA set in + [`global.tls.caCert`](#v-global-cacert) when making HTTPS calls to Consul servers and + will instead use the consul-k8s image's system CAs for TLS verification. + If false, consul-k8s components will use `global.tls.caCert` when + making HTTPS calls to Consul servers. + **NOTE:** This does not affect Consul's internal RPC communication which will + always use `global.tls.caCert`. - `k8sAuthMethodHost` ((#v-externalservers-k8sauthmethodhost)) (`string: null`) - If you are setting `global.acls.manageSystemACLs` and `connectInject.enabled` to true, set `k8sAuthMethodHost` to the address of the Kubernetes API server. From 0f58a22f0deab0b43b4f701144a54ed8ecce6700 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Tue, 8 Sep 2020 11:31:04 -0700 Subject: [PATCH 56/73] Move helm reference out --- website/data/docs-navigation.js | 2 +- website/pages/docs/k8s/{installation => }/helm.mdx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename website/pages/docs/k8s/{installation => }/helm.mdx (99%) diff --git a/website/data/docs-navigation.js b/website/data/docs-navigation.js index 8fd74023b..ceee6ffd1 100644 --- a/website/data/docs-navigation.js +++ b/website/data/docs-navigation.js @@ -94,7 +94,6 @@ export default [ { category: 'installation', content: [ - 'helm', { category: 'platforms', name: 'Platform Guides', @@ -151,6 +150,7 @@ export default [ 'ambassador', 'upgrade', 'uninstall', + 'helm', ], }, { diff --git a/website/pages/docs/k8s/installation/helm.mdx b/website/pages/docs/k8s/helm.mdx similarity index 99% rename from website/pages/docs/k8s/installation/helm.mdx rename to website/pages/docs/k8s/helm.mdx index e238549c5..5a32c0e0b 100644 --- a/website/pages/docs/k8s/installation/helm.mdx +++ b/website/pages/docs/k8s/helm.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Install with Helm Chart -sidebar_title: Install with Helm Chart +page_title: Helm Chart Reference +sidebar_title: Helm Chart Reference description: Reference for the Consul Helm chart. --- From 2f9c3b870beac60f31a47ce67f82ba254cb71ae8 Mon Sep 17 00:00:00 2001 From: Blake Covarrubias Date: Tue, 8 Sep 2020 15:55:22 -0700 Subject: [PATCH 57/73] docs: Fix rendering of link under service config endpoint HTML and markdown cannot be present in the same line. Change markdown link to HTML anchor element. --- website/pages/api-docs/agent/service.mdx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/pages/api-docs/agent/service.mdx b/website/pages/api-docs/agent/service.mdx index 1b99e751b..20e166f1c 100644 --- a/website/pages/api-docs/agent/service.mdx +++ b/website/pages/api-docs/agent/service.mdx @@ -145,8 +145,7 @@ The table below shows this endpoint's support for | ----------------- | ----------------- | ------------- | -------------- | | `YES`1 | `none` | `none` | `service:read` | -1 Supports [hash-based blocking](/api/features/blocking#hash-based-blocking-queries) -only. +1 Supports hash-based blocking only. ### Parameters From 4086385e59c8c6e935a583706a5d64dc5a333072 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 9 Sep 2020 09:12:09 +0100 Subject: [PATCH 58/73] ui: [Bugfix] KV creation from within a 'folder' (#8613) * ui: Prefill an newly created KV for the when we are on a create route * ui: Add some KV creation tests --- ui-v2/app/components/consul-kv-form/index.hbs | 2 +- ui-v2/app/routes/dc/kv/edit.js | 12 +++- ui-v2/tests/acceptance/dc/kvs/create.feature | 64 +++++++++++++++---- 3 files changed, 62 insertions(+), 16 deletions(-) diff --git a/ui-v2/app/components/consul-kv-form/index.hbs b/ui-v2/app/components/consul-kv-form/index.hbs index f5fff5391..a0a067081 100644 --- a/ui-v2/app/components/consul-kv-form/index.hbs +++ b/ui-v2/app/components/consul-kv-form/index.hbs @@ -31,7 +31,7 @@

    - -

    Learn more about Consul cloud offerings