From 0bf124502eec4be305f0a9441157a9ea153c7c2f Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sat, 7 Aug 2021 13:21:12 +0100 Subject: [PATCH 01/60] grpc Server: turn panic into error through middleware --- agent/grpc/handler.go | 11 ++++++++++- agent/xds/server.go | 8 ++++++++ go.mod | 3 ++- go.sum | 7 +++++++ 4 files changed, 27 insertions(+), 2 deletions(-) diff --git a/agent/grpc/handler.go b/agent/grpc/handler.go index 53705b4dd..e21a4b768 100644 --- a/agent/grpc/handler.go +++ b/agent/grpc/handler.go @@ -10,6 +10,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/keepalive" + + middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" ) // NewHandler returns a gRPC server that accepts connections from Handle(conn). @@ -20,8 +23,14 @@ func NewHandler(addr net.Addr, register func(server *grpc.Server)) *Handler { // We don't need to pass tls.Config to the server since it's multiplexed // behind the RPC listener, which already has TLS configured. srv := grpc.NewServer( + middleware.WithUnaryServerChain( + recovery.UnaryServerInterceptor(), + ), + middleware.WithStreamServerChain( + recovery.StreamServerInterceptor(), + (&activeStreamCounter{metrics: metrics}).Intercept, + ), grpc.StatsHandler(newStatsHandler(metrics)), - grpc.StreamInterceptor((&activeStreamCounter{metrics: metrics}).Intercept), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: 15 * time.Second, }), diff --git a/agent/xds/server.go b/agent/xds/server.go index 8e6037116..19ff44aba 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -13,6 +13,8 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" + middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -547,6 +549,12 @@ func tokenFromContext(ctx context.Context) string { func (s *Server) GRPCServer(tlsConfigurator *tlsutil.Configurator) (*grpc.Server, error) { opts := []grpc.ServerOption{ grpc.MaxConcurrentStreams(2048), + middleware.WithUnaryServerChain( + recovery.UnaryServerInterceptor(), + ), + middleware.WithStreamServerChain( + recovery.StreamServerInterceptor(), + ), } if tlsConfigurator != nil { if tlsConfigurator.Cert() != nil { diff --git a/go.mod b/go.mod index eb10c2ad2..c515bbf03 100644 --- a/go.mod +++ b/go.mod @@ -29,6 +29,7 @@ require ( github.com/google/gofuzz v1.2.0 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2 github.com/hashicorp/consul/api v1.8.0 github.com/hashicorp/consul/sdk v0.7.0 github.com/hashicorp/go-bexpr v0.1.2 @@ -72,7 +73,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.1 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pierrec/lz4 v2.5.2+incompatible // indirect - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect github.com/prometheus/client_golang v1.4.0 github.com/rboyer/safeio v0.2.1 diff --git a/go.sum b/go.sum index 672adbcf5..a886c10f4 100644 --- a/go.sum +++ b/go.sum @@ -211,7 +211,11 @@ github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2 h1:1aeRCnE2CkKYqyzBu0+B2lgTcZPc3ea2lGpijeHbI1c= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -405,6 +409,7 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -420,6 +425,8 @@ github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= From 465e9fecdac33576451e1eb1ca5537f1e05112ff Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sun, 22 Aug 2021 19:06:26 +0100 Subject: [PATCH 02/60] grpc, xds: recovery middleware to return and log error in case of panic 1) xds and grpc servers: 1.1) to use recovery middleware with callback that prints stack trace to log 1.2) callback turn the panic into a core.Internal error 2) added unit test for grpc server --- agent/consul/server.go | 2 +- agent/grpc/client_test.go | 49 ++++++++++++++++++++++++--- agent/grpc/handler.go | 30 ++++++++++++++-- agent/grpc/server_test.go | 35 +++++++++++++++++-- agent/grpc/stats_test.go | 3 +- agent/rpc/subscribe/subscribe_test.go | 2 +- agent/xds/server.go | 25 ++++++++++++-- 7 files changed, 131 insertions(+), 15 deletions(-) diff --git a/agent/consul/server.go b/agent/consul/server.go index e5e4ecb37..a7a651767 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -640,7 +640,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler &subscribeBackend{srv: s, connPool: deps.GRPCConnPool}, deps.Logger.Named("grpc-api.subscription"))) } - return agentgrpc.NewHandler(config.RPCAddr, register) + return agentgrpc.NewHandler(deps.Logger, config.RPCAddr, register) } func (s *Server) connectCARootsMonitor(ctx context.Context) { diff --git a/agent/grpc/client_test.go b/agent/grpc/client_test.go index 49922a309..a831bc8ba 100644 --- a/agent/grpc/client_test.go +++ b/agent/grpc/client_test.go @@ -1,6 +1,7 @@ package grpc import ( + "bytes" "context" "fmt" "net" @@ -11,6 +12,8 @@ import ( "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/hashicorp/consul/agent/grpc/internal/testservice" "github.com/hashicorp/consul/agent/grpc/resolver" @@ -54,7 +57,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) { res := resolver.NewServerResolverBuilder(newConfig(t)) registerWithGRPC(t, res) - srv := newTestServer(t, "server-1", "dc1") + srv := newSimpleTestServer(t, "server-1", "dc1") tlsConf, err := tlsutil.NewConfigurator(tlsutil.Config{ VerifyIncoming: true, VerifyOutgoing: true, @@ -91,7 +94,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) - srv := newTestServer(t, name, "dc1") + srv := newSimpleTestServer(t, name, "dc1") res.AddServer(srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -128,7 +131,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) - srv := newTestServer(t, name, "dc1") + srv := newSimpleTestServer(t, name, "dc1") res.AddServer(srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -177,7 +180,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_MultiDC(t *testing.T) { for _, dc := range dcs { name := "server-0-" + dc - srv := newTestServer(t, name, dc) + srv := newSimpleTestServer(t, name, dc) res.AddServer(srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -202,3 +205,41 @@ func registerWithGRPC(t *testing.T, b *resolver.ServerResolverBuilder) { resolver.Deregister(b.Authority()) }) } + +func TestRecoverMiddleware(t *testing.T) { + // Prepare a logger with output to a buffer + // so we can check what it writes. + var buf bytes.Buffer + + logger := hclog.New(&hclog.LoggerOptions{ + Output: &buf, + }) + + res := resolver.NewServerResolverBuilder(newConfig(t)) + registerWithGRPC(t, res) + + srv := newPanicTestServer(t, logger, "server-1", "dc1") + res.AddServer(srv.Metadata()) + t.Cleanup(srv.shutdown) + + pool := NewClientConnPool(res, nil, useTLSForDcAlwaysTrue) + + conn, err := pool.ClientConn("dc1") + require.NoError(t, err) + client := testservice.NewSimpleClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + t.Cleanup(cancel) + + resp, err := client.Something(ctx, &testservice.Req{}) + expectedErr := status.Errorf(codes.Internal, "grpc: panic serving request: panic from Something") + require.Equal(t, expectedErr, err) + require.Nil(t, resp) + + // Read the log + strLog := buf.String() + // Checking the entire stack trace is not possible, let's + // make sure that it contains a couple of expected strings. + require.Contains(t, strLog, `[ERROR] panic serving grpc request: panic="panic from Something`) + require.Contains(t, strLog, `github.com/hashicorp/consul/agent/grpc.(*simplePanic).Something`) +} diff --git a/agent/grpc/handler.go b/agent/grpc/handler.go index e21a4b768..3a48679b0 100644 --- a/agent/grpc/handler.go +++ b/agent/grpc/handler.go @@ -4,30 +4,39 @@ Package grpc provides a Handler and client for agent gRPC connections. package grpc import ( + "context" "fmt" "net" "time" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + "github.com/hashicorp/go-hclog" ) // NewHandler returns a gRPC server that accepts connections from Handle(conn). // The register function will be called with the grpc.Server to register // gRPC services with the server. -func NewHandler(addr net.Addr, register func(server *grpc.Server)) *Handler { +func NewHandler(logger Logger, addr net.Addr, register func(server *grpc.Server)) *Handler { + recoveryOpts := []recovery.Option{ + recovery.WithRecoveryHandlerContext(newPanicHandler(logger)), + } metrics := defaultMetrics() // We don't need to pass tls.Config to the server since it's multiplexed // behind the RPC listener, which already has TLS configured. srv := grpc.NewServer( middleware.WithUnaryServerChain( - recovery.UnaryServerInterceptor(), + // Add middlware interceptors to recover in case of panics. + recovery.UnaryServerInterceptor(recoveryOpts...), ), middleware.WithStreamServerChain( - recovery.StreamServerInterceptor(), + // Add middlware interceptors to recover in case of panics. + recovery.StreamServerInterceptor(recoveryOpts...), (&activeStreamCounter{metrics: metrics}).Intercept, ), grpc.StatsHandler(newStatsHandler(metrics)), @@ -41,6 +50,21 @@ func NewHandler(addr net.Addr, register func(server *grpc.Server)) *Handler { return &Handler{srv: srv, listener: lis} } +// newPanicHandler returns a recovery.RecoveryHandlerFuncContext closure function +// to handle panic in GRPC server's handlers. +func newPanicHandler(logger Logger) recovery.RecoveryHandlerFuncContext { + return func(ctx context.Context, p interface{}) (err error) { + // Log the panic and the stack trace of the Goroutine that caused the panic. + stacktrace := hclog.Stacktrace() + logger.Error("panic serving grpc request", + "panic", p, + "stack", stacktrace, + ) + + return status.Errorf(codes.Internal, "grpc: panic serving request: %v", p) + } +} + // Handler implements a handler for the rpc server listener, and the // agent.Component interface for managing the lifecycle of the grpc.Server. type Handler struct { diff --git a/agent/grpc/server_test.go b/agent/grpc/server_test.go index 442b617d5..d6efa826d 100644 --- a/agent/grpc/server_test.go +++ b/agent/grpc/server_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/go-hclog" ) type testServer struct { @@ -37,11 +38,22 @@ func (s testServer) Metadata() *metadata.Server { } } -func newTestServer(t *testing.T, name string, dc string) testServer { - addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} - handler := NewHandler(addr, func(server *grpc.Server) { +func newSimpleTestServer(t *testing.T, name, dc string) testServer { + return newTestServer(t, hclog.Default(), name, dc, func(server *grpc.Server) { testservice.RegisterSimpleServer(server, &simple{name: name, dc: dc}) }) +} + +// newPanicTestServer sets up a simple server with handlers that panic. +func newPanicTestServer(t *testing.T, logger hclog.Logger, name, dc string) testServer { + return newTestServer(t, logger, name, dc, func(server *grpc.Server) { + testservice.RegisterSimpleServer(server, &simplePanic{name: name, dc: dc}) + }) +} + +func newTestServer(t *testing.T, logger hclog.Logger, name, dc string, register func(server *grpc.Server)) testServer { + addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} + handler := NewHandler(logger, addr, register) lis, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -101,6 +113,23 @@ func (s *simple) Something(_ context.Context, _ *testservice.Req) (*testservice. return &testservice.Resp{ServerName: s.name, Datacenter: s.dc}, nil } +type simplePanic struct { + name, dc string +} + +func (s *simplePanic) Flow(_ *testservice.Req, flow testservice.Simple_FlowServer) error { + for flow.Context().Err() == nil { + time.Sleep(time.Millisecond) + panic("panic from Flow") + } + return nil +} + +func (s *simplePanic) Something(_ context.Context, _ *testservice.Req) (*testservice.Resp, error) { + time.Sleep(time.Millisecond) + panic("panic from Something") +} + // fakeRPCListener mimics agent/consul.Server.listen to handle the RPCType byte. // In the future we should be able to refactor Server and extract this RPC // handling logic so that we don't need to use a fake. diff --git a/agent/grpc/stats_test.go b/agent/grpc/stats_test.go index 475bbf6df..079de3408 100644 --- a/agent/grpc/stats_test.go +++ b/agent/grpc/stats_test.go @@ -15,6 +15,7 @@ import ( "google.golang.org/grpc" "github.com/hashicorp/consul/agent/grpc/internal/testservice" + "github.com/hashicorp/go-hclog" ) func noopRegister(*grpc.Server) {} @@ -23,7 +24,7 @@ func TestHandler_EmitsStats(t *testing.T) { sink, reset := patchGlobalMetrics(t) addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} - handler := NewHandler(addr, noopRegister) + handler := NewHandler(hclog.Default(), addr, noopRegister) reset() testservice.RegisterSimpleServer(handler.srv, &simple{}) diff --git a/agent/rpc/subscribe/subscribe_test.go b/agent/rpc/subscribe/subscribe_test.go index d2c13716d..7ec636ec8 100644 --- a/agent/rpc/subscribe/subscribe_test.go +++ b/agent/rpc/subscribe/subscribe_test.go @@ -317,7 +317,7 @@ var _ Backend = (*testBackend)(nil) func runTestServer(t *testing.T, server *Server) net.Addr { addr := &net.IPAddr{IP: net.ParseIP("127.0.0.1")} var grpcServer *gogrpc.Server - handler := grpc.NewHandler(addr, func(srv *gogrpc.Server) { + handler := grpc.NewHandler(hclog.New(nil), addr, func(srv *gogrpc.Server) { grpcServer = srv pbsubscribe.RegisterStateChangeSubscriptionServer(srv, server) }) diff --git a/agent/xds/server.go b/agent/xds/server.go index 19ff44aba..011cdb653 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -545,15 +545,36 @@ func tokenFromContext(ctx context.Context) string { return "" } +// newPanicHandler returns a recovery.RecoveryHandlerFuncContext closure function +// to handle panic in GRPC server's handlers. +func newPanicHandler(logger hclog.Logger) recovery.RecoveryHandlerFuncContext { + return func(ctx context.Context, p interface{}) (err error) { + // Log the panic and the stack trace of the Goroutine that caused the panic. + stacktrace := hclog.Stacktrace() + logger.Error("panic serving grpc request", + "panic", p, + "stack", stacktrace, + ) + + return status.Errorf(codes.Internal, "grpc: panic serving request: %v", p) + } +} + // GRPCServer returns a server instance that can handle xDS requests. func (s *Server) GRPCServer(tlsConfigurator *tlsutil.Configurator) (*grpc.Server, error) { + recoveryOpts := []recovery.Option{ + recovery.WithRecoveryHandlerContext(newPanicHandler(s.Logger)), + } + opts := []grpc.ServerOption{ grpc.MaxConcurrentStreams(2048), middleware.WithUnaryServerChain( - recovery.UnaryServerInterceptor(), + // Add middlware interceptors to recover in case of panics. + recovery.UnaryServerInterceptor(recoveryOpts...), ), middleware.WithStreamServerChain( - recovery.StreamServerInterceptor(), + // Add middlware interceptors to recover in case of panics. + recovery.StreamServerInterceptor(recoveryOpts...), ), } if tlsConfigurator != nil { From 0317a088e26432ecc4ccd938e17df99ed0bb6fde Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sun, 22 Aug 2021 19:21:42 +0100 Subject: [PATCH 03/60] Added changelog for grpc and xds servers panic recovery. --- .changelog/10895.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/10895.txt diff --git a/.changelog/10895.txt b/.changelog/10895.txt new file mode 100644 index 000000000..b490800d0 --- /dev/null +++ b/.changelog/10895.txt @@ -0,0 +1,3 @@ +```release-note:improvement +grpc, xds: improved reliability of grpc and xds servers by adding recovery-middleware to return and log error in case of panic. +``` From 387f6f717be8c6012b2a7de821036f22a9da8134 Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sun, 22 Aug 2021 19:35:08 +0100 Subject: [PATCH 04/60] Fix merge conflicts --- agent/grpc/client_test.go | 2 +- agent/xds/server.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/agent/grpc/client_test.go b/agent/grpc/client_test.go index 1bec1f580..284daae9e 100644 --- a/agent/grpc/client_test.go +++ b/agent/grpc/client_test.go @@ -126,7 +126,7 @@ func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { var servers []testServer for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) - srv := newTestServer(t, name, "dc1") + srv := newSimpleTestServer(t, name, "dc1") res.AddServer(srv.Metadata()) servers = append(servers, srv) t.Cleanup(srv.shutdown) diff --git a/agent/xds/server.go b/agent/xds/server.go index c3ddbda42..c0be9c560 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -563,7 +563,7 @@ func newPanicHandler(logger hclog.Logger) recovery.RecoveryHandlerFuncContext { // NewGRPCServer creates a grpc.Server, registers the Server, and then returns // the grpc.Server. func NewGRPCServer(s *Server, tlsConfigurator *tlsutil.Configurator) *grpc.Server { - recoveryOpts := []recovery.Option{ + recoveryOpts := []recovery.Option{ recovery.WithRecoveryHandlerContext(newPanicHandler(s.Logger)), } From c4a63d4665cf179ebce1251bee8aaf3e59103d58 Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sun, 22 Aug 2021 19:50:10 +0100 Subject: [PATCH 05/60] Fix go.sum with go mod tidy --- go.sum | 1 - 1 file changed, 1 deletion(-) diff --git a/go.sum b/go.sum index c19511157..bbae36703 100644 --- a/go.sum +++ b/go.sum @@ -214,7 +214,6 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2 h1:1aeRCnE2CkKYqyzBu0+B2lgTcZPc3ea2lGpijeHbI1c= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= From a5a4eb9caecc00ad28b66f28f839bae1682ef51a Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sat, 16 Oct 2021 18:02:03 +0100 Subject: [PATCH 06/60] Separete test file and no stack trace in ret error --- agent/grpc/client_test.go | 46 ----------------------------- agent/grpc/handler.go | 39 +++++++++++++++---------- agent/grpc/handler_test.go | 59 ++++++++++++++++++++++++++++++++++++++ agent/xds/server.go | 24 +++------------- go.mod | 2 +- go.sum | 4 --- 6 files changed, 88 insertions(+), 86 deletions(-) create mode 100644 agent/grpc/handler_test.go diff --git a/agent/grpc/client_test.go b/agent/grpc/client_test.go index 62cfa8d16..07715924d 100644 --- a/agent/grpc/client_test.go +++ b/agent/grpc/client_test.go @@ -1,7 +1,6 @@ package grpc import ( - "bytes" "context" "fmt" "net" @@ -15,8 +14,6 @@ import ( "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/hashicorp/consul/agent/grpc/internal/testservice" "github.com/hashicorp/consul/agent/grpc/resolver" @@ -444,46 +441,3 @@ func registerWithGRPC(t *testing.T, b *resolver.ServerResolverBuilder) { resolver.Deregister(b.Authority()) }) } - -func TestRecoverMiddleware(t *testing.T) { - // Prepare a logger with output to a buffer - // so we can check what it writes. - var buf bytes.Buffer - - logger := hclog.New(&hclog.LoggerOptions{ - Output: &buf, - }) - - res := resolver.NewServerResolverBuilder(newConfig(t)) - registerWithGRPC(t, res) - - srv := newPanicTestServer(t, logger, "server-1", "dc1", nil) - res.AddServer(srv.Metadata()) - t.Cleanup(srv.shutdown) - - pool := NewClientConnPool(ClientConnPoolConfig{ - Servers: res, - UseTLSForDC: useTLSForDcAlwaysTrue, - DialingFromServer: true, - DialingFromDatacenter: "dc1", - }) - - conn, err := pool.ClientConn("dc1") - require.NoError(t, err) - client := testservice.NewSimpleClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - t.Cleanup(cancel) - - resp, err := client.Something(ctx, &testservice.Req{}) - expectedErr := status.Errorf(codes.Internal, "grpc: panic serving request: panic from Something") - require.Equal(t, expectedErr, err) - require.Nil(t, resp) - - // Read the log - strLog := buf.String() - // Checking the entire stack trace is not possible, let's - // make sure that it contains a couple of expected strings. - require.Contains(t, strLog, `[ERROR] panic serving grpc request: panic="panic from Something`) - require.Contains(t, strLog, `github.com/hashicorp/consul/agent/grpc.(*simplePanic).Something`) -} diff --git a/agent/grpc/handler.go b/agent/grpc/handler.go index 3a48679b0..e7614c456 100644 --- a/agent/grpc/handler.go +++ b/agent/grpc/handler.go @@ -4,7 +4,6 @@ Package grpc provides a Handler and client for agent gRPC connections. package grpc import ( - "context" "fmt" "net" "time" @@ -14,8 +13,8 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" - middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + middleware "github.com/grpc-ecosystem/go-grpc-middleware" + recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" "github.com/hashicorp/go-hclog" ) @@ -23,13 +22,12 @@ import ( // The register function will be called with the grpc.Server to register // gRPC services with the server. func NewHandler(logger Logger, addr net.Addr, register func(server *grpc.Server)) *Handler { - recoveryOpts := []recovery.Option{ - recovery.WithRecoveryHandlerContext(newPanicHandler(logger)), - } metrics := defaultMetrics() - // We don't need to pass tls.Config to the server since it's multiplexed - // behind the RPC listener, which already has TLS configured. - srv := grpc.NewServer( + + recoveryOpts := PanicHandlerMiddlewareOpts(logger) + + opts := []grpc.ServerOption{ + grpc.StatsHandler(newStatsHandler(metrics)), middleware.WithUnaryServerChain( // Add middlware interceptors to recover in case of panics. recovery.UnaryServerInterceptor(recoveryOpts...), @@ -39,21 +37,32 @@ func NewHandler(logger Logger, addr net.Addr, register func(server *grpc.Server) recovery.StreamServerInterceptor(recoveryOpts...), (&activeStreamCounter{metrics: metrics}).Intercept, ), - grpc.StatsHandler(newStatsHandler(metrics)), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: 15 * time.Second, }), - ) + } + + // We don't need to pass tls.Config to the server since it's multiplexed + // behind the RPC listener, which already has TLS configured. + srv := grpc.NewServer(opts...) register(srv) lis := &chanListener{addr: addr, conns: make(chan net.Conn), done: make(chan struct{})} return &Handler{srv: srv, listener: lis} } -// newPanicHandler returns a recovery.RecoveryHandlerFuncContext closure function +// PanicHandlerMiddlewareOpts returns the []recovery.Option containing +// recovery handler function. +func PanicHandlerMiddlewareOpts(logger Logger) []recovery.Option { + return []recovery.Option{ + recovery.WithRecoveryHandler(NewPanicHandler(logger)), + } +} + +// NewPanicHandler returns a recovery.RecoveryHandlerFunc closure function // to handle panic in GRPC server's handlers. -func newPanicHandler(logger Logger) recovery.RecoveryHandlerFuncContext { - return func(ctx context.Context, p interface{}) (err error) { +func NewPanicHandler(logger Logger) recovery.RecoveryHandlerFunc { + return func(p interface{}) (err error) { // Log the panic and the stack trace of the Goroutine that caused the panic. stacktrace := hclog.Stacktrace() logger.Error("panic serving grpc request", @@ -61,7 +70,7 @@ func newPanicHandler(logger Logger) recovery.RecoveryHandlerFuncContext { "stack", stacktrace, ) - return status.Errorf(codes.Internal, "grpc: panic serving request: %v", p) + return status.Errorf(codes.Internal, "grpc: panic serving request") } } diff --git a/agent/grpc/handler_test.go b/agent/grpc/handler_test.go new file mode 100644 index 000000000..908bed0b1 --- /dev/null +++ b/agent/grpc/handler_test.go @@ -0,0 +1,59 @@ +package grpc + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/agent/grpc/internal/testservice" + "github.com/hashicorp/consul/agent/grpc/resolver" +) + +func TestHandler_PanicRecoveryInterceptor(t *testing.T) { + // Prepare a logger with output to a buffer + // so we can check what it writes. + var buf bytes.Buffer + + logger := hclog.New(&hclog.LoggerOptions{ + Output: &buf, + }) + + res := resolver.NewServerResolverBuilder(newConfig(t)) + registerWithGRPC(t, res) + + srv := newPanicTestServer(t, logger, "server-1", "dc1", nil) + res.AddServer(srv.Metadata()) + t.Cleanup(srv.shutdown) + + pool := NewClientConnPool(ClientConnPoolConfig{ + Servers: res, + UseTLSForDC: useTLSForDcAlwaysTrue, + DialingFromServer: true, + DialingFromDatacenter: "dc1", + }) + + conn, err := pool.ClientConn("dc1") + require.NoError(t, err) + client := testservice.NewSimpleClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + t.Cleanup(cancel) + + resp, err := client.Something(ctx, &testservice.Req{}) + expectedErr := status.Errorf(codes.Internal, "grpc: panic serving request") + require.Equal(t, expectedErr, err) + require.Nil(t, resp) + + // Read the log + strLog := buf.String() + // Checking the entire stack trace is not possible, let's + // make sure that it contains a couple of expected strings. + require.Contains(t, strLog, `[ERROR] panic serving grpc request: panic="panic from Something`) + require.Contains(t, strLog, `github.com/hashicorp/consul/agent/grpc.(*simplePanic).Something`) +} diff --git a/agent/xds/server.go b/agent/xds/server.go index ea6cfa279..205455be4 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -10,11 +10,11 @@ import ( envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_discovery_v2 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" envoy_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + middleware "github.com/grpc-ecosystem/go-grpc-middleware" + recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" - middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/status" "github.com/hashicorp/consul/acl" + agentgrpc "github.com/hashicorp/consul/agent/grpc" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/logging" @@ -550,27 +551,10 @@ func tokenFromContext(ctx context.Context) string { return "" } -// newPanicHandler returns a recovery.RecoveryHandlerFuncContext closure function -// to handle panic in GRPC server's handlers. -func newPanicHandler(logger hclog.Logger) recovery.RecoveryHandlerFuncContext { - return func(ctx context.Context, p interface{}) (err error) { - // Log the panic and the stack trace of the Goroutine that caused the panic. - stacktrace := hclog.Stacktrace() - logger.Error("panic serving grpc request", - "panic", p, - "stack", stacktrace, - ) - - return status.Errorf(codes.Internal, "grpc: panic serving request: %v", p) - } -} - // NewGRPCServer creates a grpc.Server, registers the Server, and then returns // the grpc.Server. func NewGRPCServer(s *Server, tlsConfigurator *tlsutil.Configurator) *grpc.Server { - recoveryOpts := []recovery.Option{ - recovery.WithRecoveryHandlerContext(newPanicHandler(s.Logger)), - } + recoveryOpts := agentgrpc.PanicHandlerMiddlewareOpts(s.Logger) opts := []grpc.ServerOption{ grpc.MaxConcurrentStreams(2048), diff --git a/go.mod b/go.mod index fea337aa6..b8c95fb1c 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/google/gofuzz v1.2.0 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2 + github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/hashicorp/consul/api v1.8.0 github.com/hashicorp/consul/sdk v0.7.0 github.com/hashicorp/go-bexpr v0.1.2 diff --git a/go.sum b/go.sum index dded0ee10..bb4ef8c89 100644 --- a/go.sum +++ b/go.sum @@ -213,8 +213,6 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2 h1:1aeRCnE2CkKYqyzBu0+B2lgTcZPc3ea2lGpijeHbI1c= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -408,7 +406,6 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -422,7 +419,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= From b549de831d51ff6dcd4071c20859e1580708551c Mon Sep 17 00:00:00 2001 From: Giulio Micheloni Date: Sat, 16 Oct 2021 18:05:32 +0100 Subject: [PATCH 07/60] Restored comment. --- agent/grpc/handler.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/agent/grpc/handler.go b/agent/grpc/handler.go index e7614c456..886186dd8 100644 --- a/agent/grpc/handler.go +++ b/agent/grpc/handler.go @@ -24,6 +24,8 @@ import ( func NewHandler(logger Logger, addr net.Addr, register func(server *grpc.Server)) *Handler { metrics := defaultMetrics() + // We don't need to pass tls.Config to the server since it's multiplexed + // behind the RPC listener, which already has TLS configured. recoveryOpts := PanicHandlerMiddlewareOpts(logger) opts := []grpc.ServerOption{ From 6d51282adfb3bb0af1f07849b3f2f6c808e702f6 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 29 Nov 2021 11:21:33 -0700 Subject: [PATCH 08/60] Prevent partition-exports entry from OSS usage Validation was added on the config entry kind since that is called when validating config entries to bootstrap via agent configuration and when applying entries via the config RPC endpoint. --- agent/structs/config_entry_exports.go | 10 +++++++--- agent/structs/config_entry_oss.go | 4 ++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index 044f9d62a..7b9d7cfb8 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -113,7 +113,12 @@ func (e *PartitionExportsConfigEntry) Validate() error { return fmt.Errorf("partition-exports Name must be the name of a partition, and not a wildcard") } - validationErr := validateConfigEntryMeta(e.Meta) + if err := requireEnterprise(e.GetKind()); err != nil { + return err + } + if err := validateConfigEntryMeta(e.Meta); err != nil { + return err + } for _, svc := range e.Services { if svc.Name == "" { @@ -128,8 +133,7 @@ func (e *PartitionExportsConfigEntry) Validate() error { } } } - - return validationErr + return nil } func (e *PartitionExportsConfigEntry) CanRead(authz acl.Authorizer) bool { diff --git a/agent/structs/config_entry_oss.go b/agent/structs/config_entry_oss.go index c338bdcba..f7ccac38c 100644 --- a/agent/structs/config_entry_oss.go +++ b/agent/structs/config_entry_oss.go @@ -35,3 +35,7 @@ func validateUnusedKeys(unused []string) error { func validateInnerEnterpriseMeta(_, _ *EnterpriseMeta) error { return nil } + +func requireEnterprise(kind string) error { + return fmt.Errorf("Config entry kind %q requires Consul Enterprise", kind) +} From 76146dfc5beb1e5e2499c9ff856d358d13ab9397 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 29 Nov 2021 12:14:25 -0700 Subject: [PATCH 09/60] Move ent config test to ent file --- agent/consul/config_replication_test.go | 101 ------------------------ 1 file changed, 101 deletions(-) diff --git a/agent/consul/config_replication_test.go b/agent/consul/config_replication_test.go index 5c25101e2..5231d43a4 100644 --- a/agent/consul/config_replication_test.go +++ b/agent/consul/config_replication_test.go @@ -92,107 +92,6 @@ func TestReplication_ConfigSort(t *testing.T) { } } -func TestReplication_DisallowedConfigEntries(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.PrimaryDatacenter = "dc1" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - testrpc.WaitForLeader(t, s1.RPC, "dc1") - client := rpcClient(t, s1) - defer client.Close() - - dir2, s2 := testServerWithConfig(t, func(c *Config) { - c.Datacenter = "dc2" - c.PrimaryDatacenter = "dc1" - c.ConfigReplicationRate = 100 - c.ConfigReplicationBurst = 100 - c.ConfigReplicationApplyLimit = 1000000 - }) - testrpc.WaitForLeader(t, s2.RPC, "dc2") - defer os.RemoveAll(dir2) - defer s2.Shutdown() - - // Try to join. - joinWAN(t, s2, s1) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s1.RPC, "dc2") - - args := []structs.ConfigEntryRequest{ - { - Datacenter: "dc1", - Op: structs.ConfigEntryUpsert, - Entry: &structs.ServiceConfigEntry{ - Kind: structs.ServiceDefaults, - Name: "foo", - Protocol: "http2", - }, - }, - { - Datacenter: "dc1", - Op: structs.ConfigEntryUpsert, - Entry: &structs.PartitionExportsConfigEntry{ - Name: "default", - Services: []structs.ExportedService{ - { - Name: structs.WildcardSpecifier, - Consumers: []structs.ServiceConsumer{ - { - Partition: "non-default", - }, - }, - }, - }, - }, - }, - { - Datacenter: "dc1", - Op: structs.ConfigEntryUpsert, - Entry: &structs.ProxyConfigEntry{ - Kind: structs.ProxyDefaults, - Name: "global", - Config: map[string]interface{}{ - "Protocol": "http", - }, - }, - }, - { - Datacenter: "dc1", - Op: structs.ConfigEntryUpsert, - Entry: &structs.MeshConfigEntry{ - TransparentProxy: structs.TransparentProxyMeshConfig{ - MeshDestinationsOnly: true, - }, - }, - }, - } - for _, arg := range args { - out := false - require.NoError(t, s1.RPC("ConfigEntry.Apply", &arg, &out)) - } - - retry.Run(t, func(r *retry.R) { - _, local, err := s2.fsm.State().ConfigEntries(nil, structs.ReplicationEnterpriseMeta()) - require.NoError(r, err) - require.Len(r, local, 3) - - localKinds := make([]string, 0) - for _, entry := range local { - localKinds = append(localKinds, entry.GetKind()) - } - - // Should have all inserted kinds except for partition-exports. - expectKinds := []string{ - structs.ProxyDefaults, structs.ServiceDefaults, structs.MeshConfig, - } - require.ElementsMatch(r, expectKinds, localKinds) - }) -} - func TestReplication_ConfigEntries(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") From b1c6608c88291b47f97dd79f480dec11edd5403e Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 29 Nov 2021 12:19:48 -0700 Subject: [PATCH 10/60] Rename partition CLI command --- website/content/commands/admin-partition.mdx | 56 ++++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/website/content/commands/admin-partition.mdx b/website/content/commands/admin-partition.mdx index 8ed85427e..e52e46b06 100644 --- a/website/content/commands/admin-partition.mdx +++ b/website/content/commands/admin-partition.mdx @@ -1,32 +1,32 @@ --- layout: commands -page_title: 'Commands: admin-partition' +page_title: 'Commands: partition' description: | - The admin-partition command enables you create and manage Consul Enterprise admin partitions. + The partition command enables you create and manage Consul Enterprise admin partitions. --- # Consul Admin Partition -Command: `consul admin-partition` +Command: `consul partition` -The `admin-partition` command enables you to create and manage Consul Enterprise administrative or admin partitions. Admin partitions are boundaries that allow multiple tenants to exist independently of each other on a shared set of Consul servers. This feature is currently in beta. +The `partition` command enables you to create and manage Consul Enterprise administrative or admin partitions. Admin partitions are boundaries that allow multiple tenants to exist independently of each other on a shared set of Consul servers. This feature is currently in beta. If ACLs are enabled then a token with operator privileges may be required in order to use this command. -You should only run the `admin-partition` command in the primary datacenter. +You should only run the `partition` command in the primary datacenter. ## Usage ```shell-session -consul admin-partition +consul partition ``` -Issue the `consul admin-partition -h` command to view the subcommands. +Issue the `consul partition -h` command to view the subcommands. ```shell-session -Usage: consul admin-partition [options] [args] +Usage: consul partition [options] [args] This command has subcommands for interacting with Consul Enterprise admin partitions. Here are some simple examples. More detailed @@ -34,41 +34,41 @@ Usage: consul admin-partition [options] [args] Create an admin partition - $ consul admin-partition create -name team1 + $ consul partition create -name team1 Create or Update an admin partition from its full definition: - $ consul admin-partition write part1 + $ consul partition write part1 Read an admin partition: - $ consul admin-partition read team1 + $ consul partition read team1 List all admin partitions: - $ consul admin-partition list + $ consul partition list Update an admin partition - $ consul admin-partition update -name team1 -description "first admin-partition" + $ consul partition update -name team1 -description "first partition" Delete an admin partition: - $ consul admin-partition delete team1 + $ consul partition delete team1 For more examples, ask for subcommand help or view the documentation. ``` ## Subcommands -You can issue the following subcommands with the `consul admin-partition` command. +You can issue the following subcommands with the `consul partition` command. ### `create` The `create` subcommand sends a request to the server to create a new admin partition. ```shell-session -consul admin-partition create +consul partition create ``` The admin partition is created according to the values specified in the options. You can specify the following options: @@ -83,7 +83,7 @@ The admin partition is created according to the values specified in the options. In the following example, a partition named `webdev` is created: ```shell-session -consul admin-partition create -name "webdev" -description "Partition for admin of webdev services" -format json -show-meta +consul partition create -name "webdev" -description "Partition for admin of webdev services" -format json -show-meta { "Name": "webdev", @@ -100,16 +100,16 @@ The `write` subcommand sends a request to the server to create a new admin parti Use the following syntax to write from file: ```shell-session -consul admin-partition write +consul partition write ``` Use the following syntax to write from `stdin`: ```shell-session -consul admin-partition write - +consul partition write - ``` -The definition file or `stdin` values can be provided in JSON or HCL format. Refer to the [Admin Partition Definition](#admin-partition-definition) section for details about the supported parameters. +The definition file or `stdin` values can be provided in JSON or HCL format. Refer to the [Admin Partition Definition](#partition-definition) section for details about the supported parameters. You can specify the following options: @@ -121,7 +121,7 @@ You can specify the following options: In the following example, the `webdev-bu` partition is written using `stdin` values: ```shell-session -consul admin-partition write -format json -show-meta - <<< 'name = "webdev-bu" description = "backup webdev partition"' +consul partition write -format json -show-meta - <<< 'name = "webdev-bu" description = "backup webdev partition"' { "Name": "webdev-bu", @@ -136,7 +136,7 @@ consul admin-partition write -format json -show-meta - <<< 'name = "webdev-bu" The `read` subcommand sends a request to the server to read the configuration for the specified partition and print it to the console. ```shell-session -consul admin-partition read +consul partition read ``` The admin partition is created according to the values specified in the options. You can specify the following options: @@ -149,7 +149,7 @@ The admin partition is created according to the values specified in the options. In the following example, the configuration for the `webdev` partition is read: ```shell-session -consul admin-partition read -format json -meta webdev +consul partition read -format json -meta webdev { "Name": "webdev", @@ -163,7 +163,7 @@ consul admin-partition read -format json -meta webdev The `list` subcommand prints existing admin partitions to the console. ```shell-session -consul admin-partition list +consul partition list ``` The admin partition is created according to the values specified in the options. You can specify the following options: @@ -176,7 +176,7 @@ The admin partition is created according to the values specified in the options. The following example lists the admin partitions and their meta data in JSON format: ```shell-session -consul admin-partition list -format json -show-meta +consul partition list -format json -show-meta [ { @@ -204,12 +204,12 @@ consul admin-partition list -format json -show-meta The `delete` subcommand sends a request to the server to remove the specified partition. ```shell-session -consul admin-partition delete +consul partition delete ``` In the following example, the `webdev-bu` partition is deleted: ```shell-session -consul admin-partition delete webdev +consul partition delete webdev ``` ## Admin Partition Definition @@ -234,7 +234,7 @@ Description = "Partition for dev team" ## HTTP API Options -You can include the following options to interact with the HTTP API when using the `admin-partition` command. +You can include the following options to interact with the HTTP API when using the `partition` command. | Option | Description | Default | Required | | --- | --- | --- | --- | From 1f49738167ecb52127e88d634fbe7ffe3e488f66 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Thu, 2 Dec 2021 16:45:45 -0500 Subject: [PATCH 11/60] Use raft-boltdb/v2 --- agent/consul/server.go | 2 +- go.mod | 4 +++- go.sum | 11 ++++++++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/agent/consul/server.go b/agent/consul/server.go index dee51b15b..f52e43cac 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -25,7 +25,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" - raftboltdb "github.com/hashicorp/raft-boltdb" + raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/serf/serf" "golang.org/x/time/rate" "google.golang.org/grpc" diff --git a/go.mod b/go.mod index 99c07da62..19ba3005f 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,8 @@ require ( github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 github.com/hashicorp/raft v1.3.2 github.com/hashicorp/raft-autopilot v0.1.5 - github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea + github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 // indirect + github.com/hashicorp/raft-boltdb/v2 v2.2.0 github.com/hashicorp/serf v0.9.6 github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 @@ -79,6 +80,7 @@ require ( github.com/ryanuber/columnize v2.1.0+incompatible github.com/shirou/gopsutil/v3 v3.21.10 github.com/stretchr/testify v1.7.0 + go.etcd.io/bbolt v1.3.5 go.opencensus.io v0.22.0 // indirect go.uber.org/goleak v1.1.10 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a diff --git a/go.sum b/go.sum index d99fe3b6b..84979bb41 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -284,14 +285,19 @@ github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.2 h1:j2tqHqFnDdWCepLxzuo3b6WzS2krIweBrvEoqBbWMTo= github.com/hashicorp/raft v1.3.2/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft-autopilot v0.1.5 h1:onEfMH5uHVdXQqtas36zXUHEZxLdsJVu/nXHLcLdL1I= github.com/hashicorp/raft-autopilot v0.1.5/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= +github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 h1:Ye8SofeDHJzu9xvvaMmpMkqHELWW7rTcXwdUR0CWW48= +github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= +github.com/hashicorp/raft-boltdb/v2 v2.2.0 h1:/CVN9LSAcH50L3yp2TsPFIpeyHn1m3VF6kiutlDE3Nw= +github.com/hashicorp/raft-boltdb/v2 v2.2.0/go.mod h1:SgPUD5TP20z/bswEr210SnkUFvQP/YjKV95aaiTbeMQ= github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 h1:OKsyxKi2sNmqm1Gv93adf2AID2FOBFdCbbZn9fGtIdg= @@ -520,6 +526,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -620,6 +628,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From 8e2c71528f43c0ceb9d901835461fe684dd21461 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Wed, 17 Nov 2021 18:15:19 -0500 Subject: [PATCH 12/60] config: add NoFreelistSync option # Conflicts: # agent/config/testdata/TestRuntimeConfig_Sanitize-enterprise.golden # agent/consul/server.go --- agent/agent.go | 1 + agent/config/builder.go | 4 ++++ agent/config/config.go | 4 ++++ agent/config/runtime.go | 2 ++ agent/config/runtime_test.go | 1 + agent/config/testdata/TestRuntimeConfig_Sanitize.golden | 3 +++ agent/config/testdata/full-config.hcl | 3 +++ agent/config/testdata/full-config.json | 3 +++ agent/consul/config.go | 6 ++++++ agent/consul/server.go | 8 +++++++- 10 files changed, 34 insertions(+), 1 deletion(-) diff --git a/agent/agent.go b/agent/agent.go index e048b33aa..4b4689eb0 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1263,6 +1263,7 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co } cfg.ConfigEntryBootstrap = runtimeCfg.ConfigEntryBootstrap + cfg.RaftBoltDBConfig = runtimeCfg.RaftBoltDBConfig // Duplicate our own serf config once to make sure that the duplication // function does not drift. diff --git a/agent/config/builder.go b/agent/config/builder.go index 012229ae3..55c938212 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -1094,6 +1094,10 @@ func (b *builder) build() (rt RuntimeConfig, err error) { rt.UseStreamingBackend = boolValWithDefault(c.UseStreamingBackend, true) + if c.RaftBoltDBConfig != nil { + rt.RaftBoltDBConfig = *c.RaftBoltDBConfig + } + if rt.Cache.EntryFetchMaxBurst <= 0 { return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_max_burst must be strictly positive, was: %v", rt.Cache.EntryFetchMaxBurst) } diff --git a/agent/config/config.go b/agent/config/config.go index 61161b7b5..7d8ecadbb 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -4,6 +4,8 @@ import ( "encoding/json" "fmt" + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/hcl" "github.com/mitchellh/mapstructure" @@ -256,6 +258,8 @@ type Config struct { RPC RPC `mapstructure:"rpc"` + RaftBoltDBConfig *consul.RaftBoltDBConfig `mapstructure:"raft_boltdb"` + // UseStreamingBackend instead of blocking queries for service health and // any other endpoints which support streaming. UseStreamingBackend *bool `mapstructure:"use_streaming_backend"` diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 2e7d7cf97..e2393363f 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -943,6 +943,8 @@ type RuntimeConfig struct { // hcl: raft_trailing_logs = int RaftTrailingLogs int + RaftBoltDBConfig consul.RaftBoltDBConfig + // ReconnectTimeoutLAN specifies the amount of time to wait to reconnect with // another agent before deciding it's permanently gone. This can be used to // control the time it takes to reap failed nodes from the cluster. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 390305902..8abbcc403 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -6015,6 +6015,7 @@ func TestLoad_FullConfig(t *testing.T) { "args": []interface{}{"dltjDJ2a", "flEa7C2d"}, }, }, + RaftBoltDBConfig: consul.RaftBoltDBConfig{NoFreelistSync: true}, } entFullRuntimeConfig(expected) diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 2d1093d1e..84c303c76 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -252,6 +252,9 @@ "RPCMaxConnsPerClient": 0, "RPCProtocol": 0, "RPCRateLimit": 0, + "RaftBoltDBConfig": { + "NoFreelistSync": false + }, "RaftProtocol": 3, "RaftSnapshotInterval": "0s", "RaftSnapshotThreshold": 0, diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index 939745c9b..869f67252 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -328,6 +328,9 @@ raft_protocol = 3 raft_snapshot_threshold = 16384 raft_snapshot_interval = "30s" raft_trailing_logs = 83749 +raft_boltdb { + NoFreelistSync = true +} read_replica = true reconnect_timeout = "23739s" reconnect_timeout_wan = "26694s" diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 4649c86bf..017651d88 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -326,6 +326,9 @@ "raft_snapshot_threshold": 16384, "raft_snapshot_interval": "30s", "raft_trailing_logs": 83749, + "raft_boltdb": { + "NoFreelistSync": true + }, "read_replica": true, "reconnect_timeout": "23739s", "reconnect_timeout_wan": "26694s", diff --git a/agent/consul/config.go b/agent/consul/config.go index 86c87f5d7..9c343494a 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -391,6 +391,8 @@ type Config struct { RPCConfig RPCConfig + RaftBoltDBConfig RaftBoltDBConfig + // Embedded Consul Enterprise specific configuration *EnterpriseConfig } @@ -603,3 +605,7 @@ type ReloadableConfig struct { RaftSnapshotInterval time.Duration RaftTrailingLogs int } + +type RaftBoltDBConfig struct { + NoFreelistSync bool +} diff --git a/agent/consul/server.go b/agent/consul/server.go index f52e43cac..45acf9535 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -18,6 +18,7 @@ import ( "time" "github.com/hashicorp/go-version" + "go.etcd.io/bbolt" "github.com/armon/go-metrics" connlimit "github.com/hashicorp/go-connlimit" @@ -729,7 +730,12 @@ func (s *Server) setupRaft() error { } // Create the backend raft store for logs and stable storage. - store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db")) + store, err := raftboltdb.New(raftboltdb.Options{ + BoltOptions: &bbolt.Options{ + NoFreelistSync: s.config.RaftBoltDBConfig.NoFreelistSync, + }, + Path: filepath.Join(path, "raft.db"), + }) if err != nil { return err } From 68e629a476e1ad08e6a758f6c48c4cc9811c5b80 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Thu, 2 Dec 2021 16:52:42 -0500 Subject: [PATCH 13/60] Emit raft-boltdb metrics --- agent/consul/server.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/agent/consul/server.go b/agent/consul/server.go index 45acf9535..14468439d 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -742,6 +742,9 @@ func (s *Server) setupRaft() error { s.raftStore = store stable = store + // start publishing boltdb metrics + go store.RunMetrics(&lib.StopChannelContext{StopCh: s.shutdownCh}, 0) + // Wrap the store in a LogCache to improve performance. cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) if err != nil { From 607b0e95275bff3bbe312c0b1a25e0cfb1937042 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Thu, 2 Dec 2021 16:59:13 -0500 Subject: [PATCH 14/60] Add the changelog entry for bbolt modifications --- .changelog/11720.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .changelog/11720.txt diff --git a/.changelog/11720.txt b/.changelog/11720.txt new file mode 100644 index 000000000..9c141b3c7 --- /dev/null +++ b/.changelog/11720.txt @@ -0,0 +1,11 @@ +```release-note:improvement +raft: Use bbolt instead of the legacy boltdb implementation +``` + +```release-note:improvement +raft: Emit boltdb related performance metrics +``` + +```release-note:improvement +raft: Added a configuration to disable boltdb freelist syncing +``` From e38ccf0a22009650ad3f51cce94fc9b1b42a6e3f Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Fri, 3 Dec 2021 17:23:55 -0500 Subject: [PATCH 15/60] port oss changes (#11736) --- agent/consul/session_endpoint.go | 4 +-- agent/consul/session_ttl.go | 20 ++----------- agent/consul/session_ttl_test.go | 50 ++----------------------------- agent/consul/state/session_oss.go | 4 +++ 4 files changed, 12 insertions(+), 66 deletions(-) diff --git a/agent/consul/session_endpoint.go b/agent/consul/session_endpoint.go index e15b05227..ae39a6fc5 100644 --- a/agent/consul/session_endpoint.go +++ b/agent/consul/session_endpoint.go @@ -151,7 +151,7 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error { if args.Op == structs.SessionCreate && args.Session.TTL != "" { // If we created a session with a TTL, reset the expiration timer - s.srv.resetSessionTimer(args.Session.ID, &args.Session) + s.srv.resetSessionTimer(&args.Session) } else if args.Op == structs.SessionDestroy { // If we destroyed a session, it might potentially have a TTL, // and we need to clear the timer @@ -308,7 +308,7 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest, // Reset the session TTL timer. reply.Sessions = structs.Sessions{session} - if err := s.srv.resetSessionTimer(args.SessionID, session); err != nil { + if err := s.srv.resetSessionTimer(session); err != nil { s.logger.Error("Session renew failed", "error", err) return err } diff --git a/agent/consul/session_ttl.go b/agent/consul/session_ttl.go index 426179d96..0bb1cb3f1 100644 --- a/agent/consul/session_ttl.go +++ b/agent/consul/session_ttl.go @@ -47,13 +47,12 @@ func (s *Server) initializeSessionTimers() error { // Scan all sessions and reset their timer state := s.fsm.State() - // TODO(partitions): track all session timers in all partitions - _, sessions, err := state.SessionList(nil, structs.WildcardEnterpriseMetaInDefaultPartition()) + _, sessions, err := state.SessionListAll(nil) if err != nil { return err } for _, session := range sessions { - if err := s.resetSessionTimer(session.ID, session); err != nil { + if err := s.resetSessionTimer(session); err != nil { return err } } @@ -63,20 +62,7 @@ func (s *Server) initializeSessionTimers() error { // resetSessionTimer is used to renew the TTL of a session. // This can be used for new sessions and existing ones. A session // will be faulted in if not given. -func (s *Server) resetSessionTimer(id string, session *structs.Session) error { - // Fault the session in if not given - if session == nil { - state := s.fsm.State() - _, s, err := state.SessionGet(nil, id, nil) - if err != nil { - return err - } - if s == nil { - return fmt.Errorf("Session '%s' not found", id) - } - session = s - } - +func (s *Server) resetSessionTimer(session *structs.Session) error { // Bail if the session has no TTL, fast-path some common inputs switch session.TTL { case "", "0", "0s", "0m", "0h": diff --git a/agent/consul/session_ttl_test.go b/agent/consul/session_ttl_test.go index 160a5b69e..5fc4b09f3 100644 --- a/agent/consul/session_ttl_test.go +++ b/agent/consul/session_ttl_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/net-rpc-msgpackrpc" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" ) func generateUUID() (ret string) { @@ -59,50 +59,6 @@ func TestInitializeSessionTimers(t *testing.T) { } } -func TestResetSessionTimer_Fault(t *testing.T) { - if testing.Short() { - t.Skip("too slow for testing.Short") - } - - t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Should not exist - err := s1.resetSessionTimer(generateUUID(), nil) - if err == nil || !strings.Contains(err.Error(), "not found") { - t.Fatalf("err: %v", err) - } - - // Create a session - state := s1.fsm.State() - if err := state.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil { - t.Fatalf("err: %s", err) - } - session := &structs.Session{ - ID: generateUUID(), - Node: "foo", - TTL: "10s", - } - if err := state.SessionCreate(100, session); err != nil { - t.Fatalf("err: %v", err) - } - - // Reset the session timer - err = s1.resetSessionTimer(session.ID, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Check that we have a timer - if s1.sessionTimers.Get(session.ID) == nil { - t.Fatalf("missing session timer") - } -} - func TestResetSessionTimer_NoTTL(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -130,7 +86,7 @@ func TestResetSessionTimer_NoTTL(t *testing.T) { } // Reset the session timer - err := s1.resetSessionTimer(session.ID, session) + err := s1.resetSessionTimer(session) if err != nil { t.Fatalf("err: %v", err) } @@ -155,7 +111,7 @@ func TestResetSessionTimer_InvalidTTL(t *testing.T) { } // Reset the session timer - err := s1.resetSessionTimer(session.ID, session) + err := s1.resetSessionTimer(session) if err == nil || !strings.Contains(err.Error(), "Invalid Session TTL") { t.Fatalf("err: %v", err) } diff --git a/agent/consul/state/session_oss.go b/agent/consul/state/session_oss.go index a706f2c14..d313fb5f9 100644 --- a/agent/consul/state/session_oss.go +++ b/agent/consul/state/session_oss.go @@ -187,3 +187,7 @@ func (s *Store) SessionList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) func maxIndexTxnSessions(tx *memdb.Txn, _ *structs.EnterpriseMeta) uint64 { return maxIndexTxn(tx, tableSessions) } + +func (s *Store) SessionListAll(ws memdb.WatchSet) (uint64, structs.Sessions, error) { + return s.SessionList(ws, nil) +} From 43e28a3af603ebc96a3c8b413974e2f97d569aaf Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Fri, 3 Dec 2021 23:04:09 +0000 Subject: [PATCH 16/60] query: support `ResultsFilteredByACLs` in query list endpoint (#11620) --- agent/consul/acl.go | 23 +- agent/consul/acl_test.go | 243 ++++++++++++++----- agent/consul/prepared_query_endpoint.go | 4 +- agent/consul/prepared_query_endpoint_test.go | 40 +++ 4 files changed, 245 insertions(+), 65 deletions(-) diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 8c76139e5..32dd23b10 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1592,8 +1592,10 @@ func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) { // filterPreparedQueries is used to filter prepared queries based on ACL rules. // We prune entries the user doesn't have access to, and we redact any tokens -// if the user doesn't have a management token. -func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { +// if the user doesn't have a management token. Returns true if any (named) +// queries were removed - un-named queries are meant to be ephemeral and can +// only be enumerated by a management token +func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) bool { var authzContext acl.AuthorizerContext structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext) // Management tokens can see everything with no filtering. @@ -1601,17 +1603,22 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { // the 1.4 ACL rewrite. The global-management token will provide unrestricted query privileges // so asking for ACLWrite should be unnecessary. if f.authorizer.ACLWrite(&authzContext) == acl.Allow { - return + return false } // Otherwise, we need to see what the token has access to. + var namedQueriesRemoved bool ret := make(structs.PreparedQueries, 0, len(*queries)) for _, query := range *queries { // If no prefix ACL applies to this query then filter it, since // we know at this point the user doesn't have a management // token, otherwise see what the policy says. - prefix, ok := query.GetACLPrefix() - if !ok || f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow { + prefix, hasName := query.GetACLPrefix() + switch { + case hasName && f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow: + namedQueriesRemoved = true + fallthrough + case !hasName: f.logger.Debug("dropping prepared query from result due to ACLs", "query", query.ID) continue } @@ -1623,6 +1630,7 @@ func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) { ret = append(ret, final) } *queries = ret + return namedQueriesRemoved } func (f *aclFilter) filterToken(token **structs.ACLToken) { @@ -1847,6 +1855,9 @@ func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, sub case *structs.IndexedCheckServiceNodes: v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes) + case *structs.PreparedQueryExecuteResponse: + v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes) + case *structs.IndexedServiceTopology: filtered := filt.filterServiceTopology(v.ServiceTopology) if filtered { @@ -1891,7 +1902,7 @@ func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, sub v.QueryMeta.ResultsFilteredByACLs = filt.filterSessions(&v.Sessions) case *structs.IndexedPreparedQueries: - filt.filterPreparedQueries(&v.Queries) + v.QueryMeta.ResultsFilteredByACLs = filt.filterPreparedQueries(&v.Queries) case **structs.PreparedQuery: filt.redactPreparedQueryTokens(v) diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 8b22ebb14..c789819c4 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -2752,6 +2752,108 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { }) } +func TestACL_filterPreparedQueryExecuteResponse(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.PreparedQueryExecuteResponse { + return &structs.PreparedQueryExecuteResponse{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Checks: structs.HealthChecks{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, + }, + }, + }, + } + } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Nodes) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + func TestACL_filterServiceTopology(t *testing.T) { t.Parallel() // Create some nodes. @@ -3353,70 +3455,97 @@ func TestFilterACL_redactTokenSecrets(t *testing.T) { func TestACL_filterPreparedQueries(t *testing.T) { t.Parallel() - queries := structs.PreparedQueries{ - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", - Name: "query-with-no-token", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d3", - Name: "query-with-a-token", - Token: "root", - }, + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedPreparedQueries { + return &structs.IndexedPreparedQueries{ + Queries: structs.PreparedQueries{ + {ID: "f004177f-2c28-83b7-4229-eacc25fe55d1"}, + { + ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", + Name: "query-with-no-token", + }, + { + ID: "f004177f-2c28-83b7-4229-eacc25fe55d3", + Name: "query-with-a-token", + Token: "root", + }, + }, + } } - expected := structs.PreparedQueries{ - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", - Name: "query-with-no-token", - }, - &structs.PreparedQuery{ - ID: "f004177f-2c28-83b7-4229-eacc25fe55d3", - Name: "query-with-a-token", - Token: "root", - }, - } + t.Run("management token", func(t *testing.T) { + require := require.New(t) - // Try permissive filtering with a management token. This will allow the - // embedded token to be seen. - filt := newACLFilter(acl.ManageAll(), nil) - filt.filterPreparedQueries(&queries) - if !reflect.DeepEqual(queries, expected) { - t.Fatalf("bad: %#v", queries) - } + list := makeList() + filterACLWithAuthorizer(logger, acl.ManageAll(), list) - // Hang on to the entry with a token, which needs to survive the next - // operation. - original := queries[2] + // Check we get the un-named query. + require.Len(list.Queries, 3) - // Now try permissive filtering with a client token, which should cause - // the embedded token to get redacted, and the query with no name to get - // filtered out. - filt = newACLFilter(acl.AllowAll(), nil) - filt.filterPreparedQueries(&queries) - expected[2].Token = redactedToken - expected = append(structs.PreparedQueries{}, expected[1], expected[2]) - if !reflect.DeepEqual(queries, expected) { - t.Fatalf("bad: %#v", queries) - } + // Check we get the un-redacted token. + require.Equal("root", list.Queries[2].Token) - // Make sure that the original object didn't lose its token. - if original.Token != "root" { - t.Fatalf("bad token: %s", original.Token) - } + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) - // Now try restrictive filtering. - filt = newACLFilter(acl.DenyAll(), nil) - filt.filterPreparedQueries(&queries) - if len(queries) != 0 { - t.Fatalf("bad: %#v", queries) - } + t.Run("permissive filtering", func(t *testing.T) { + require := require.New(t) + + list := makeList() + queryWithToken := list.Queries[2] + + filterACLWithAuthorizer(logger, acl.AllowAll(), list) + + // Check the un-named query is filtered out. + require.Len(list.Queries, 2) + + // Check the token is redacted. + require.Equal(redactedToken, list.Queries[1].Token) + + // Check the original object is unmodified. + require.Equal("root", queryWithToken.Token) + + // ResultsFilteredByACLs should not include un-named queries, which are only + // readable by a management token. + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("limited access", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + query "query-with-a-token" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + // Check we only get the query we have access to. + require.Len(list.Queries, 1) + + // Check the token is redacted. + require.Equal(redactedToken, list.Queries[0].Token) + + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("restrictive filtering", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Queries) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterServiceList(t *testing.T) { diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 6c9b90c4f..7c6620239 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -373,7 +373,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, if query.Token != "" { token = query.Token } - if err := p.srv.filterACL(token, &reply.Nodes); err != nil { + if err := p.srv.filterACL(token, reply); err != nil { return err } @@ -500,7 +500,7 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe if args.Query.Token != "" { token = args.Query.Token } - if err := p.srv.filterACL(token, &reply.Nodes); err != nil { + if err := p.srv.filterACL(token, reply); err != nil { return err } diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index e454bd756..05485dad7 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -1167,6 +1167,31 @@ func TestPreparedQuery_List(t *testing.T) { } } + // Same for a token without access to the query. + { + token := createTokenWithPolicyName(t, codec, "deny-queries", ` + query_prefix "" { + policy = "deny" + } + `, "root") + + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var resp structs.IndexedPreparedQueries + if err := msgpackrpc.CallWithCodec(codec, "PreparedQuery.List", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Queries) != 0 { + t.Fatalf("bad: %v", resp) + } + if !resp.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + } + // But a management token should work, and be able to see the captured // token. query.Query.Token = "le-token" @@ -2124,6 +2149,7 @@ func TestPreparedQuery_Execute(t *testing.T) { require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) expectNodes(t, &query, &reply, 0) + require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") }) t.Run("normal operation again with exec token", func(t *testing.T) { @@ -2246,6 +2272,20 @@ func TestPreparedQuery_Execute(t *testing.T) { expectFailoverNodes(t, &query, &reply, 0) }) + t.Run("nodes in response from dc2 are filtered by ACL token", func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execNoNodesToken}, + } + + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + + expectFailoverNodes(t, &query, &reply, 0) + require.True(t, reply.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + // Bake the exec token into the query. query.Query.Token = execToken require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) From 2f4b8d7a7d8378d9ea1389c260c2be75835edd5c Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Fri, 3 Dec 2021 23:04:24 +0000 Subject: [PATCH 17/60] internal: support `ResultsFilteredByACLs` flag/header (#11643) --- agent/consul/acl.go | 24 +- agent/consul/acl_test.go | 447 ++++++++++++++++++++----- agent/consul/internal_endpoint.go | 17 +- agent/consul/internal_endpoint_test.go | 224 ++++++++++++- 4 files changed, 605 insertions(+), 107 deletions(-) diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 32dd23b10..e5ae2cdaf 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1465,11 +1465,13 @@ func (f *aclFilter) filterIntentions(ixns *structs.Intentions) bool { } // filterNodeDump is used to filter through all parts of a node dump and -// remove elements the provided ACL token cannot access. -func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { +// remove elements the provided ACL token cannot access. Returns true if +// any elements were removed. +func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) bool { nd := *dump var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(nd); i++ { info := nd[i] @@ -1477,6 +1479,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { info.FillAuthzContext(&authzContext) if node := info.Node; !f.allowNode(node, &authzContext) { f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, info.GetEnterpriseMeta())) + removed = true nd = append(nd[:i], nd[i+1:]...) i-- continue @@ -1490,6 +1493,7 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { continue } f.logger.Debug("dropping service from result due to ACLs", "service", svc) + removed = true info.Services = append(info.Services[:j], info.Services[j+1:]...) j-- } @@ -1502,17 +1506,21 @@ func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { continue } f.logger.Debug("dropping check from result due to ACLs", "check", chk.CheckID) + removed = true info.Checks = append(info.Checks[:j], info.Checks[j+1:]...) j-- } } *dump = nd + return removed } -// filterServiceDump is used to filter nodes based on ACL rules. -func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) { +// filterServiceDump is used to filter nodes based on ACL rules. Returns true +// if any elements were removed. +func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) bool { svcs := *services var authzContext acl.AuthorizerContext + var removed bool for i := 0; i < len(svcs); i++ { service := svcs[i] @@ -1530,10 +1538,12 @@ func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) { } f.logger.Debug("dropping service from result due to ACLs", "service", service.GatewayService.Service) + removed = true svcs = append(svcs[:i], svcs[i+1:]...) i-- } *services = svcs + return removed } // filterNodes is used to filter through all parts of a node list and remove @@ -1823,8 +1833,8 @@ func (f *aclFilter) filterServiceList(services *structs.ServiceList) bool { // filterGatewayServices is used to filter gateway to service mappings based on ACL rules. // Returns true if any elements were removed. func (f *aclFilter) filterGatewayServices(mappings *structs.GatewayServices) bool { - var removed bool ret := make(structs.GatewayServices, 0, len(*mappings)) + var removed bool for _, s := range *mappings { // This filter only checks ServiceRead on the linked service. // ServiceRead on the gateway is checked in the GatewayServices endpoint before filtering. @@ -1878,10 +1888,10 @@ func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, sub v.QueryMeta.ResultsFilteredByACLs = filt.filterIntentions(&v.Intentions) case *structs.IndexedNodeDump: - filt.filterNodeDump(&v.Dump) + v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeDump(&v.Dump) case *structs.IndexedServiceDump: - filt.filterServiceDump(&v.Dump) + v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceDump(&v.Dump) case *structs.IndexedNodes: v.QueryMeta.ResultsFilteredByACLs = filt.filterNodes(&v.Nodes) diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index c789819c4..8d159cbad 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -3126,107 +3126,105 @@ func TestACL_filterSessions(t *testing.T) { func TestACL_filterNodeDump(t *testing.T) { t.Parallel() - // Create a node dump. - fill := func() structs.NodeDump { - return structs.NodeDump{ - &structs.NodeInfo{ - Node: "node1", - Services: []*structs.NodeService{ - { - ID: "foo", - Service: "foo", + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedNodeDump { + return &structs.IndexedNodeDump{ + Dump: structs.NodeDump{ + { + Node: "node1", + Services: []*structs.NodeService{ + { + ID: "foo", + Service: "foo", + }, }, - }, - Checks: []*structs.HealthCheck{ - { - Node: "node1", - CheckID: "check1", - ServiceName: "foo", + Checks: []*structs.HealthCheck{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, }, }, }, } } - // Try permissive filtering. - { - dump := fill() - filt := newACLFilter(acl.AllowAll(), nil) - filt.filterNodeDump(&dump) - if len(dump) != 1 { - t.Fatalf("bad: %#v", dump) - } - if len(dump[0].Services) != 1 { - t.Fatalf("bad: %#v", dump[0].Services) - } - if len(dump[0].Checks) != 1 { - t.Fatalf("bad: %#v", dump[0].Checks) - } - } + t.Run("allowed", func(t *testing.T) { + require := require.New(t) - // Try restrictive filtering. - { - dump := fill() - filt := newACLFilter(acl.DenyAll(), nil) - filt.filterNodeDump(&dump) - if len(dump) != 0 { - t.Fatalf("bad: %#v", dump) - } - } + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) - // Allowed to see the service but not the node. - policy, err := acl.NewPolicyFromSource(` -service "foo" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) - // But the node will block it. - { - dump := fill() - filt := newACLFilter(perms, nil) - filt.filterNodeDump(&dump) - if len(dump) != 0 { - t.Fatalf("bad: %#v", dump) - } - } + list := makeList() + filterACLWithAuthorizer(logger, authz, list) - // Chain on access to the node. - policy, err = acl.NewPolicyFromSource(` -node "node1" { - policy = "read" -} -`, acl.SyntaxLegacy, nil, nil) - if err != nil { - t.Fatalf("err %v", err) - } - perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + require.Len(list.Dump, 1) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) - // Now it should go through. - { - dump := fill() - filt := newACLFilter(perms, nil) - filt.filterNodeDump(&dump) - if len(dump) != 1 { - t.Fatalf("bad: %#v", dump) - } - if len(dump[0].Services) != 1 { - t.Fatalf("bad: %#v", dump[0].Services) - } - if len(dump[0].Checks) != 1 { - t.Fatalf("bad: %#v", dump[0].Checks) - } - } + t.Run("allowed to read the service, but not the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Dump, 1) + require.Empty(list.Dump[0].Services) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) } func TestACL_filterNodes(t *testing.T) { @@ -3257,6 +3255,277 @@ func TestACL_filterNodes(t *testing.T) { require.Len(nodes, 0) } +func TestACL_filterIndexedNodesWithGateways(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedNodesWithGateways { + return &structs.IndexedNodesWithGateways{ + Nodes: structs.CheckServiceNodes{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + ID: "foo", + Service: "foo", + }, + Checks: structs.HealthChecks{ + { + Node: "node1", + CheckID: "check1", + ServiceName: "foo", + }, + }, + }, + }, + Gateways: structs.GatewayServices{ + {Service: structs.ServiceNameFromString("foo")}, + {Service: structs.ServiceNameFromString("bar")}, + }, + } + } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + service "bar" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.Len(list.Gateways, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("not allowed to read the node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + service "bar" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.Len(list.Gateways, 2) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("allowed to read the node, but not the service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service "bar" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Nodes) + require.Len(list.Gateways, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("not allowed to read the other gatway service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "read" + } + node "node1" { + policy = "read" + } + `, acl.SyntaxLegacy, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Nodes, 1) + require.Len(list.Gateways, 1) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("denied", func(t *testing.T) { + require := require.New(t) + + list := makeList() + filterACLWithAuthorizer(logger, acl.DenyAll(), list) + + require.Empty(list.Nodes) + require.Empty(list.Gateways) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + +func TestACL_filterIndexedServiceDump(t *testing.T) { + t.Parallel() + + logger := hclog.NewNullLogger() + + makeList := func() *structs.IndexedServiceDump { + return &structs.IndexedServiceDump{ + Dump: structs.ServiceDump{ + { + Node: &structs.Node{ + Node: "node1", + }, + Service: &structs.NodeService{ + Service: "foo", + }, + GatewayService: &structs.GatewayService{ + Service: structs.ServiceNameFromString("foo"), + Gateway: structs.ServiceNameFromString("foo-gateway"), + }, + }, + // No node information. + { + GatewayService: &structs.GatewayService{ + Service: structs.ServiceNameFromString("bar"), + Gateway: structs.ServiceNameFromString("bar-gateway"), + }, + }, + }, + } + } + + t.Run("allowed", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service_prefix "foo" { + policy = "read" + } + service_prefix "bar" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Dump, 2) + require.False(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("not allowed to access node", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + service_prefix "foo" { + policy = "read" + } + service_prefix "bar" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Len(list.Dump, 1) + require.Equal("bar", list.Dump[0].GatewayService.Service.Name) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("not allowed to access service", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service "foo-gateway" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("not allowed to access gateway", func(t *testing.T) { + require := require.New(t) + + policy, err := acl.NewPolicyFromSource(` + node "node1" { + policy = "read" + } + service "foo" { + policy = "read" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(err) + + list := makeList() + filterACLWithAuthorizer(logger, authz, list) + + require.Empty(list.Dump) + require.True(list.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { t.Parallel() diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index e90d43200..14fbc4be4 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -72,18 +72,21 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, if err != nil { return err } - reply.Index, reply.Dump = index, dump - if err := m.srv.filterACL(args.Token, reply); err != nil { - return err - } raw, err := filter.Execute(reply.Dump) if err != nil { return err } - reply.Dump = raw.(structs.NodeDump) + + // Note: we filter the results with ACLs *after* applying the user-supplied + // bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include + // results that would be filtered out even if the user did have permission. + if err := m.srv.filterACL(args.Token, reply); err != nil { + return err + } + return nil }) } @@ -114,10 +117,6 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. } reply.Nodes = nodes - if err := m.srv.filterACL(args.Token, &reply.Nodes); err != nil { - return err - } - // Get, store, and filter gateway services idx, gatewayServices, err := state.DumpGatewayServices(ws) if err != nil { diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index a1fd93427..7d293ad0e 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -461,7 +461,7 @@ func TestInternal_NodeInfo_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedNodeDump{} - if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &opt, &reply); err != nil { t.Fatalf("err: %s", err) } for _, info := range reply.Dump { @@ -492,6 +492,10 @@ func TestInternal_NodeInfo_FilterACL(t *testing.T) { } } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves // that we respect the version 8 ACL flag, since the test server sets @@ -515,7 +519,7 @@ func TestInternal_NodeDump_FilterACL(t *testing.T) { QueryOptions: structs.QueryOptions{Token: token}, } reply := structs.IndexedNodeDump{} - if err := msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &opt, &reply); err != nil { + if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &opt, &reply); err != nil { t.Fatalf("err: %s", err) } for _, info := range reply.Dump { @@ -546,6 +550,10 @@ func TestInternal_NodeDump_FilterACL(t *testing.T) { } } + if !reply.QueryMeta.ResultsFilteredByACLs { + t.Fatal("ResultsFilteredByACLs should be true") + } + // We've already proven that we call the ACL filtering function so we // test node filtering down in acl.go for node cases. This also proves // that we respect the version 8 ACL flag, since the test server sets @@ -750,6 +758,217 @@ func TestInternal_ServiceDump_Kind(t *testing.T) { }) } +func TestInternal_ServiceDump_ACL(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + dir, s := testServerWithConfig(t, func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLMasterToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir) + defer s.Shutdown() + codec := rpcClient(t, s) + defer codec.Close() + + testrpc.WaitForLeader(t, s.RPC, "dc1") + + registrations := []*structs.RegisterRequest{ + // Service `redis` on `node1` + { + Datacenter: "dc1", + Node: "node1", + ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"), + Address: "192.18.1.1", + Service: &structs.NodeService{ + Kind: structs.ServiceKindTypical, + ID: "redis", + Service: "redis", + Port: 5678, + }, + Check: &structs.HealthCheck{ + Name: "redis check", + Status: api.HealthPassing, + ServiceID: "redis", + }, + }, + // Ingress gateway `igw` on `node2` + { + Datacenter: "dc1", + Node: "node2", + ID: types.NodeID("3a9d7530-20d4-443a-98d3-c10fe78f09f4"), + Address: "192.18.1.2", + Service: &structs.NodeService{ + Kind: structs.ServiceKindIngressGateway, + ID: "igw", + Service: "igw", + }, + Check: &structs.HealthCheck{ + Name: "igw check", + Status: api.HealthPassing, + ServiceID: "igw", + }, + }, + } + for _, reg := range registrations { + reg.Token = "root" + err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil) + require.NoError(t, err) + } + + { + req := structs.ConfigEntryRequest{ + Datacenter: "dc1", + Entry: &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "igw", + Listeners: []structs.IngressListener{ + { + Port: 8765, + Protocol: "tcp", + Services: []structs.IngressService{ + {Name: "redis"}, + }, + }, + }, + }, + } + req.Token = "root" + + var out bool + err := msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out) + require.NoError(t, err) + } + + tokenWithRules := func(t *testing.T, rules string) string { + t.Helper() + tok, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", rules) + require.NoError(t, err) + return tok.SecretID + } + + t.Run("can read all", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node_prefix "" { + policy = "read" + } + service_prefix "" { + policy = "read" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.NotEmpty(out.Nodes) + require.NotEmpty(out.Gateways) + require.False(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be false") + }) + + t.Run("cannot read service node", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node1" { + policy = "deny" + } + service "redis" { + policy = "read" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Nodes) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("cannot read service", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node1" { + policy = "read" + } + service "redis" { + policy = "deny" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Nodes) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("cannot read gateway node", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node2" { + policy = "deny" + } + service "mgw" { + policy = "read" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Gateways) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) + + t.Run("cannot read gateway", func(t *testing.T) { + require := require.New(t) + + token := tokenWithRules(t, ` + node "node2" { + policy = "read" + } + service "mgw" { + policy = "deny" + } + `) + + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var out structs.IndexedNodesWithGateways + err := msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out) + require.NoError(err) + require.Empty(out.Gateways) + require.True(out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") + }) +} + func TestInternal_GatewayServiceDump_Terminating(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -1082,6 +1301,7 @@ func TestInternal_GatewayServiceDump_Terminating_ACL(t *testing.T) { require.Equal(t, nodes[0].Node.Node, "bar") require.Equal(t, nodes[0].Service.Service, "db") require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning) + require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true") } func TestInternal_GatewayServiceDump_Ingress(t *testing.T) { From 5baf5283c9fc2a993fd9d395d6ba4d389e69a744 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 16:55:19 -0700 Subject: [PATCH 18/60] Add changelog entry --- .changelog/11680.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11680.txt diff --git a/.changelog/11680.txt b/.changelog/11680.txt new file mode 100644 index 000000000..57617f706 --- /dev/null +++ b/.changelog/11680.txt @@ -0,0 +1,3 @@ +```release-note:improvement +server: block enterprise-specific partition-exports config entry from being used in OSS Consul. +``` \ No newline at end of file From d32bc117d878ba14b60ff12a9154eb554a87d802 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 00:14:50 -0700 Subject: [PATCH 19/60] Fix integ test --- test/integration/connect/envoy/run-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index bbeb7c423..ce507845a 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -352,7 +352,7 @@ function run_tests { start_consul secondary fi if is_set $REQUIRE_PARTITIONS; then - docker_consul "primary" admin-partition create -name ap1 > /dev/null + docker_consul "primary" consul partition create -name ap1 > /dev/null start_partitioned_client ap1 fi From 827b5cc55815152f30d51cbd0439d85a22a9f49b Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 00:37:21 -0700 Subject: [PATCH 20/60] Rename internal AdminPartition references This commit finishes replacing references to "AdminPartition" with "Partition". This now matches other uses in the codebase such as the CLI command, HTTP API, and the query parameter. --- api/partition.go | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/api/partition.go b/api/partition.go index daf211bfc..d849378b3 100644 --- a/api/partition.go +++ b/api/partition.go @@ -6,8 +6,8 @@ import ( "time" ) -// AdminPartition is the configuration of a single admin partition. Admin Partitions are a Consul Enterprise feature. -type AdminPartition struct { +// Partition is the configuration of a single admin partition. Admin Partitions are a Consul Enterprise feature. +type Partition struct { // Name is the name of the Partition. Name string `json:"Name"` @@ -29,10 +29,6 @@ type AdminPartition struct { // PartitionDefaultName is the default partition value. const PartitionDefaultName = "default" -type AdminPartitions struct { - Partitions []*AdminPartition -} - // Partitions can be used to manage Partitions in Consul Enterprise.. type Partitions struct { c *Client @@ -43,7 +39,7 @@ func (c *Client) Partitions() *Partitions { return &Partitions{c} } -func (p *Partitions) Create(ctx context.Context, partition *AdminPartition, q *WriteOptions) (*AdminPartition, *WriteMeta, error) { +func (p *Partitions) Create(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { if partition.Name == "" { return nil, nil, fmt.Errorf("Must specify a Name for Partition creation") } @@ -62,7 +58,7 @@ func (p *Partitions) Create(ctx context.Context, partition *AdminPartition, q *W } wm := &WriteMeta{RequestTime: rtt} - var out AdminPartition + var out Partition if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -70,7 +66,7 @@ func (p *Partitions) Create(ctx context.Context, partition *AdminPartition, q *W return &out, wm, nil } -func (p *Partitions) Update(ctx context.Context, partition *AdminPartition, q *WriteOptions) (*AdminPartition, *WriteMeta, error) { +func (p *Partitions) Update(ctx context.Context, partition *Partition, q *WriteOptions) (*Partition, *WriteMeta, error) { if partition.Name == "" { return nil, nil, fmt.Errorf("Must specify a Name for Partition updating") } @@ -89,7 +85,7 @@ func (p *Partitions) Update(ctx context.Context, partition *AdminPartition, q *W } wm := &WriteMeta{RequestTime: rtt} - var out AdminPartition + var out Partition if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -97,8 +93,8 @@ func (p *Partitions) Update(ctx context.Context, partition *AdminPartition, q *W return &out, wm, nil } -func (p *Partitions) Read(ctx context.Context, name string, q *QueryOptions) (*AdminPartition, *QueryMeta, error) { - var out AdminPartition +func (p *Partitions) Read(ctx context.Context, name string, q *QueryOptions) (*Partition, *QueryMeta, error) { + var out Partition r := p.c.newRequest("GET", "/v1/partition/"+name) r.setQueryOptions(q) r.ctx = ctx @@ -143,8 +139,8 @@ func (p *Partitions) Delete(ctx context.Context, name string, q *WriteOptions) ( return wm, nil } -func (p *Partitions) List(ctx context.Context, q *QueryOptions) (*AdminPartitions, *QueryMeta, error) { - var out *AdminPartitions +func (p *Partitions) List(ctx context.Context, q *QueryOptions) ([]*Partition, *QueryMeta, error) { + var out []*Partition r := p.c.newRequest("GET", "/v1/partitions") r.setQueryOptions(q) r.ctx = ctx From 2dcb1f7e08a8d6ad5b3dd8c7f805b5633e95a199 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 12:52:41 -0700 Subject: [PATCH 21/60] Clarify feature name in partition docstring --- api/partition.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/partition.go b/api/partition.go index d849378b3..88edfb7b0 100644 --- a/api/partition.go +++ b/api/partition.go @@ -12,7 +12,7 @@ type Partition struct { Name string `json:"Name"` // Description is where the user puts any information they want - // about the partition. It is not used internally. + // about the admin partition. It is not used internally. Description string `json:"Description,omitempty"` // DeletedAt is the time when the Partition was marked for deletion @@ -29,7 +29,7 @@ type Partition struct { // PartitionDefaultName is the default partition value. const PartitionDefaultName = "default" -// Partitions can be used to manage Partitions in Consul Enterprise.. +// Partitions can be used to manage Partitions in Consul Enterprise. type Partitions struct { c *Client } From 768519813055dfa6ab2aaaed87ee5ce8a6a2f468 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 17:12:43 -0700 Subject: [PATCH 22/60] Add changelog entry --- .changelog/11737.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11737.txt diff --git a/.changelog/11737.txt b/.changelog/11737.txt new file mode 100644 index 000000000..4c4addb2a --- /dev/null +++ b/.changelog/11737.txt @@ -0,0 +1,3 @@ +```release-note:improvement +partitions: **(Enterprise only)** rename APIs, commands, and public types to use "partition" rather than "admin partition". +``` \ No newline at end of file From 97b4068137e47b75422a992f75594a0e8be3a74a Mon Sep 17 00:00:00 2001 From: freddygv Date: Tue, 30 Nov 2021 23:03:08 -0700 Subject: [PATCH 23/60] Update listener generation to account for consul VIP --- agent/xds/listeners.go | 17 +++++++++++++---- agent/xds/listeners_test.go | 3 ++- .../transparent-proxy.envoy-1-20-x.golden | 4 ++++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 1b1292d64..4cb85aea0 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -168,13 +168,22 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. // We do not match on all endpoints here since it would lead to load balancing across // all instances when any instance address is dialed. for _, e := range endpoints { - if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" { + if vip := e.Service.TaggedAddresses[structs.TaggedAddressVirtualIP]; vip.Address != "" { uniqueAddrs[vip.Address] = struct{}{} } + + // The virtualIPTag is used by consul-k8s to store the ClusterIP for a service. + // We only match on this virtual IP if the upstream is in the proxy's partition. + // This is because the IP is not guaranteed to be unique across k8s clusters. + if structs.EqualPartitions(e.Node.PartitionOrDefault(), cfgSnap.ProxyID.PartitionOrDefault()) { + if vip := e.Service.TaggedAddresses[virtualIPTag]; vip.Address != "" { + uniqueAddrs[vip.Address] = struct{}{} + } + } } - if len(uniqueAddrs) > 1 { - s.Logger.Warn("detected multiple virtual IPs for an upstream, all will be used to match traffic", - "upstream", id) + if len(uniqueAddrs) > 2 { + s.Logger.Debug("detected multiple virtual IPs for an upstream, all will be used to match traffic", + "upstream", id, "ip_count", len(uniqueAddrs)) } // For every potential address we collected, create the appropriate address prefix to match on. diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index c082d41ea..acf197961 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -863,7 +863,8 @@ func TestListenersFromSnapshot(t *testing.T) { Address: "9.9.9.9", Port: 9090, TaggedAddresses: map[string]structs.ServiceAddress{ - "virtual": {Address: "10.0.0.1"}, + "virtual": {Address: "10.0.0.1"}, + structs.TaggedAddressVirtualIP: {Address: "240.0.0.1"}, }, }, }, diff --git a/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden b/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden index 6c6691c61..d390e3d9f 100644 --- a/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden +++ b/agent/xds/testdata/listeners/transparent-proxy.envoy-1-20-x.golden @@ -42,6 +42,10 @@ { "addressPrefix": "10.0.0.1", "prefixLen": 32 + }, + { + "addressPrefix": "240.0.0.1", + "prefixLen": 32 } ] }, From 142d8193e5f686eaaccdd1e1290c87c1a47b2747 Mon Sep 17 00:00:00 2001 From: freddygv Date: Wed, 1 Dec 2021 17:44:13 -0700 Subject: [PATCH 24/60] Add a new table to query service names by kind This table purposefully does not index by partition/namespace. It's a global view into all service names. This table is intended to replace the current serviceListTxn watch in intentionTopologyTxn. For cross-partition transparent proxying we need to be able to calculate upstreams from intentions in any partition. This means that the existing serviceListTxn function is insufficient since it's scoped to a partition. Moving away from that function is also beneficial because it watches the main "services" table, so watchers will wake up when any instance is registered or deregistered. --- agent/consul/fsm/snapshot_oss_test.go | 22 +++- agent/consul/state/catalog.go | 67 ++++++++++++ agent/consul/state/catalog_oss.go | 28 +++-- agent/consul/state/catalog_oss_test.go | 37 +++++++ agent/consul/state/catalog_schema.go | 78 ++++++++++++++ agent/consul/state/catalog_test.go | 137 +++++++++++++++++++++++++ agent/consul/state/schema.go | 1 + agent/consul/state/schema_test.go | 1 + agent/structs/structs.go | 9 ++ 9 files changed, 370 insertions(+), 10 deletions(-) diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index 10e1cd061..652706865 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -464,6 +464,14 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.NoError(t, err) require.Equal(t, vip, "240.0.0.2") + _, serviceNames, err := fsm.state.ServiceNamesOfKind(nil, structs.ServiceKindTypical) + require.NoError(t, err) + + expect := []string{"backend", "db", "frontend", "web"} + for i, sn := range serviceNames { + require.Equal(t, expect[i], sn.Service.Name) + } + // Snapshot snap, err := fsm.Snapshot() require.NoError(t, err) @@ -690,10 +698,10 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Len(t, roots, 2) // Verify provider state is restored. - _, state, err := fsm2.state.CAProviderState("asdf") + _, provider, err := fsm2.state.CAProviderState("asdf") require.NoError(t, err) - require.Equal(t, "foo", state.PrivateKey) - require.Equal(t, "bar", state.RootCert) + require.Equal(t, "foo", provider.PrivateKey) + require.Equal(t, "bar", provider.RootCert) // Verify CA configuration is restored. _, caConf, err := fsm2.state.CAConfig(nil) @@ -751,6 +759,14 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.NoError(t, err) require.Equal(t, meshConfig, meshConfigEntry) + _, restoredServiceNames, err := fsm2.state.ServiceNamesOfKind(nil, structs.ServiceKindTypical) + require.NoError(t, err) + + expect = []string{"backend", "db", "frontend", "web"} + for i, sn := range restoredServiceNames { + require.Equal(t, expect[i], sn.Service.Name) + } + // Snapshot snap, err = fsm2.Snapshot() require.NoError(t, err) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index d0974c30e..896cbc1ee 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -741,6 +741,9 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool if err = checkGatewayWildcardsAndUpdate(tx, idx, svc); err != nil { return fmt.Errorf("failed updating gateway mapping: %s", err) } + if err := upsertKindServiceName(tx, idx, svc.Kind, svc.CompoundServiceName()); err != nil { + return fmt.Errorf("failed to persist service name: %v", err) + } // Update upstream/downstream mappings if it's a connect service if svc.Kind == structs.ServiceKindConnectProxy || svc.Connect.Native { @@ -1691,6 +1694,9 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st if err := freeServiceVirtualIP(tx, svc.ServiceName, entMeta); err != nil { return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err) } + if err := cleanupKindServiceName(tx, idx, svc.CompoundServiceName(), svc.ServiceKind); err != nil { + return fmt.Errorf("failed to persist service name: %v", err) + } } } else { return fmt.Errorf("Could not find any service %s: %s", svc.ServiceName, err) @@ -2526,6 +2532,26 @@ func (s *Store) VirtualIPForService(sn structs.ServiceName) (string, error) { return result.String(), nil } +func (s *Store) KindServiceNamesOfKind(ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + var names []*KindServiceName + iter, err := tx.Get(tableKindServiceNames, indexKindOnly, kind) + if err != nil { + return 0, nil, err + } + ws.Add(iter.WatchCh()) + + idx := kindServiceNamesMaxIndex(tx, ws, kind) + for name := iter.Next(); name != nil; name = iter.Next() { + ksn := name.(*KindServiceName) + names = append(names, ksn) + } + + return idx, names, nil +} + // parseCheckServiceNodes is used to parse through a given set of services, // and query for an associated node and a set of checks. This is the inner // method used to return a rich set of results from a more simple query. @@ -3862,3 +3888,44 @@ func truncateGatewayServiceTopologyMappings(tx WriteTxn, idx uint64, gateway str return nil } + +func upsertKindServiceName(tx WriteTxn, idx uint64, kind structs.ServiceKind, name structs.ServiceName) error { + q := KindServiceNameQuery{Name: name.Name, Kind: kind, EnterpriseMeta: name.EnterpriseMeta} + existing, err := tx.First(tableKindServiceNames, indexID, q) + if err != nil { + return err + } + + // Service name is already known. Nothing to do. + if existing != nil { + return nil + } + + ksn := KindServiceName{ + Kind: kind, + Service: name, + RaftIndex: structs.RaftIndex{ + CreateIndex: idx, + ModifyIndex: idx, + }, + } + if err := tx.Insert(tableKindServiceNames, &ksn); err != nil { + return fmt.Errorf("failed inserting %s/%s into %s: %s", kind, name.String(), tableKindServiceNames, err) + } + if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { + return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) + } + return nil +} + +func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, kind structs.ServiceKind) error { + q := KindServiceNameQuery{Name: name.Name, Kind: kind, EnterpriseMeta: name.EnterpriseMeta} + if _, err := tx.DeleteAll(tableKindServiceNames, indexID, q); err != nil { + return fmt.Errorf("failed to delete %s from %s: %s", name, tableKindServiceNames, err) + } + + if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { + return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) + } + return nil +} diff --git a/agent/consul/state/catalog_oss.go b/agent/consul/state/catalog_oss.go index e71d13ae3..f2902ca71 100644 --- a/agent/consul/state/catalog_oss.go +++ b/agent/consul/state/catalog_oss.go @@ -5,6 +5,7 @@ package state import ( "fmt" + "strings" memdb "github.com/hashicorp/go-memdb" @@ -18,13 +19,7 @@ func serviceIndexName(name string, _ *structs.EnterpriseMeta) string { } func serviceKindIndexName(kind structs.ServiceKind, _ *structs.EnterpriseMeta) string { - switch kind { - case structs.ServiceKindTypical: - // needs a special case here - return "service_kind.typical" - default: - return "service_kind." + string(kind) - } + return "service_kind." + kind.Normalized() } func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *structs.EnterpriseMeta) error { @@ -192,3 +187,22 @@ func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) ( func (s *Store) ValidateRegisterRequest(_ *structs.RegisterRequest) (*structs.EnterpriseMeta, error) { return nil, nil } + +func indexFromKindServiceName(arg interface{}) ([]byte, error) { + var b indexBuilder + + switch n := arg.(type) { + case KindServiceNameQuery: + b.String(strings.ToLower(string(n.Kind))) + b.String(strings.ToLower(n.Name)) + return b.Bytes(), nil + + case *KindServiceName: + b.String(strings.ToLower(string(n.Kind))) + b.String(strings.ToLower(n.Service.Name)) + return b.Bytes(), nil + + default: + return nil, fmt.Errorf("type must be KindServiceNameQuery or *KindServiceName: %T", arg) + } +} diff --git a/agent/consul/state/catalog_oss_test.go b/agent/consul/state/catalog_oss_test.go index 04162072b..5811416b1 100644 --- a/agent/consul/state/catalog_oss_test.go +++ b/agent/consul/state/catalog_oss_test.go @@ -412,3 +412,40 @@ func testIndexerTableServiceVirtualIPs() map[string]indexerTestCase { }, } } + +func testIndexerTableKindServiceNames() map[string]indexerTestCase { + obj := &KindServiceName{ + Service: structs.ServiceName{ + Name: "web-sidecar-proxy", + }, + Kind: structs.ServiceKindConnectProxy, + } + + return map[string]indexerTestCase{ + indexID: { + read: indexValue{ + source: &KindServiceName{ + Service: structs.ServiceName{ + Name: "web-sidecar-proxy", + }, + Kind: structs.ServiceKindConnectProxy, + }, + expected: []byte("connect-proxy\x00web-sidecar-proxy\x00"), + }, + write: indexValue{ + source: obj, + expected: []byte("connect-proxy\x00web-sidecar-proxy\x00"), + }, + }, + indexKind: { + read: indexValue{ + source: structs.ServiceKindConnectProxy, + expected: []byte("connect-proxy\x00"), + }, + write: indexValue{ + source: obj, + expected: []byte("connect-proxy\x00"), + }, + }, + } +} diff --git a/agent/consul/state/catalog_schema.go b/agent/consul/state/catalog_schema.go index b67bf5049..c03f649be 100644 --- a/agent/consul/state/catalog_schema.go +++ b/agent/consul/state/catalog_schema.go @@ -19,6 +19,7 @@ const ( tableMeshTopology = "mesh-topology" tableServiceVirtualIPs = "service-virtual-ips" tableFreeVirtualIPs = "free-virtual-ips" + tableKindServiceNames = "kind-service-names" indexID = "id" indexService = "service" @@ -661,3 +662,80 @@ func freeVirtualIPTableSchema() *memdb.TableSchema { }, } } + +type KindServiceName struct { + Kind structs.ServiceKind + Service structs.ServiceName + + structs.RaftIndex +} + +func kindServiceNameTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tableKindServiceNames, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: indexerSingle{ + readIndex: indexFromKindServiceName, + writeIndex: indexFromKindServiceName, + }, + }, + indexKindOnly: { + Name: indexKindOnly, + AllowMissing: false, + Unique: false, + Indexer: indexerSingle{ + readIndex: indexFromKindServiceNameKindOnly, + writeIndex: indexFromKindServiceNameKindOnly, + }, + }, + }, + } +} + +// KindServiceNameQuery is used to lookup service names by kind or enterprise meta. +type KindServiceNameQuery struct { + Kind structs.ServiceKind + Name string + structs.EnterpriseMeta +} + +// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (q KindServiceNameQuery) NamespaceOrDefault() string { + return q.EnterpriseMeta.NamespaceOrDefault() +} + +// PartitionOrDefault exists because structs.EnterpriseMeta uses a pointer +// receiver for this method. Remove once that is fixed. +func (q KindServiceNameQuery) PartitionOrDefault() string { + return q.EnterpriseMeta.PartitionOrDefault() +} + +func indexFromKindServiceNameKindOnly(raw interface{}) ([]byte, error) { + switch x := raw.(type) { + case *KindServiceName: + var b indexBuilder + b.String(strings.ToLower(string(x.Kind))) + return b.Bytes(), nil + + case structs.ServiceKind: + var b indexBuilder + b.String(strings.ToLower(string(x))) + return b.Bytes(), nil + + default: + return nil, fmt.Errorf("type must be *KindServiceName or structs.ServiceKind: %T", raw) + } +} + +func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) uint64 { + return maxIndexWatchTxn(tx, ws, kindServiceNameIndexName(kind)) +} + +func kindServiceNameIndexName(kind structs.ServiceKind) string { + return "kind_service_names." + kind.Normalized() +} diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index c95989b80..c4d7a775a 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -7656,6 +7656,143 @@ func TestProtocolForIngressGateway(t *testing.T) { } } +func TestStateStore_EnsureService_ServiceNames(t *testing.T) { + s := testStateStore(t) + + // Create the service registration. + entMeta := structs.DefaultEnterpriseMetaInDefaultPartition() + + services := []structs.NodeService{ + { + Kind: structs.ServiceKindIngressGateway, + ID: "ingress-gateway", + Service: "ingress-gateway", + Address: "2.2.2.2", + Port: 2222, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindMeshGateway, + ID: "mesh-gateway", + Service: "mesh-gateway", + Address: "4.4.4.4", + Port: 4444, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindConnectProxy, + ID: "connect-proxy", + Service: "connect-proxy", + Address: "1.1.1.1", + Port: 1111, + Proxy: structs.ConnectProxyConfig{DestinationServiceName: "foo"}, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindTerminatingGateway, + ID: "terminating-gateway", + Service: "terminating-gateway", + Address: "3.3.3.3", + Port: 3333, + EnterpriseMeta: *entMeta, + }, + { + Kind: structs.ServiceKindTypical, + ID: "web", + Service: "web", + Address: "5.5.5.5", + Port: 5555, + EnterpriseMeta: *entMeta, + }, + } + + var idx uint64 + testRegisterNode(t, s, idx, "node1") + + for _, svc := range services { + idx++ + require.NoError(t, s.EnsureService(idx, "node1", &svc)) + + // Ensure the service name was stored for all of them under the appropriate kind + gotIdx, gotNames, err := s.KindServiceNamesOfKind(nil, svc.Kind) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + require.Len(t, gotNames, 1) + require.Equal(t, svc.CompoundServiceName(), gotNames[0].Service) + require.Equal(t, svc.Kind, gotNames[0].Kind) + } + + // Register another ingress gateway and there should be two names under the kind index + newIngress := structs.NodeService{ + Kind: structs.ServiceKindIngressGateway, + ID: "new-ingress-gateway", + Service: "new-ingress-gateway", + Address: "6.6.6.6", + Port: 6666, + EnterpriseMeta: *entMeta, + } + idx++ + require.NoError(t, s.EnsureService(idx, "node1", &newIngress)) + + gotIdx, got, err := s.KindServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + + expect := []*KindServiceName{ + { + Kind: structs.ServiceKindIngressGateway, + Service: structs.NewServiceName("ingress-gateway", nil), + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + Kind: structs.ServiceKindIngressGateway, + Service: structs.NewServiceName("new-ingress-gateway", nil), + RaftIndex: structs.RaftIndex{ + CreateIndex: idx, + ModifyIndex: idx, + }, + }, + } + require.Equal(t, expect, got) + + // Deregister an ingress gateway and the index should not slide back + idx++ + require.NoError(t, s.DeleteService(idx, "node1", "new-ingress-gateway", entMeta)) + + gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + require.Equal(t, expect[:1], got) + + // Registering another instance of a known service should not bump the kind index + newMGW := structs.NodeService{ + Kind: structs.ServiceKindMeshGateway, + ID: "mesh-gateway-1", + Service: "mesh-gateway", + Address: "7.7.7.7", + Port: 7777, + EnterpriseMeta: *entMeta, + } + idx++ + require.NoError(t, s.EnsureService(idx, "node1", &newMGW)) + + gotIdx, _, err = s.KindServiceNamesOfKind(nil, structs.ServiceKindMeshGateway) + require.NoError(t, err) + require.Equal(t, uint64(2), gotIdx) + + // Deregister the single typical service and the service name should also be dropped + idx++ + require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta)) + + gotIdx, got, err = s.KindServiceNamesOfKind(nil, structs.ServiceKindTypical) + require.NoError(t, err) + require.Equal(t, idx, gotIdx) + require.Empty(t, got) +} + func runStep(t *testing.T, name string, fn func(t *testing.T)) { t.Helper() if !t.Run(name, fn) { diff --git a/agent/consul/state/schema.go b/agent/consul/state/schema.go index 4005469fd..75a2ffa74 100644 --- a/agent/consul/state/schema.go +++ b/agent/consul/state/schema.go @@ -40,6 +40,7 @@ func newDBSchema() *memdb.DBSchema { tombstonesTableSchema, usageTableSchema, freeVirtualIPTableSchema, + kindServiceNameTableSchema, ) withEnterpriseSchema(db) return db diff --git a/agent/consul/state/schema_test.go b/agent/consul/state/schema_test.go index b83491587..7ef17c8fd 100644 --- a/agent/consul/state/schema_test.go +++ b/agent/consul/state/schema_test.go @@ -50,6 +50,7 @@ func TestNewDBSchema_Indexers(t *testing.T) { tableMeshTopology: testIndexerTableMeshTopology, tableGatewayServices: testIndexerTableGatewayServices, tableServiceVirtualIPs: testIndexerTableServiceVirtualIPs, + tableKindServiceNames: testIndexerTableKindServiceNames, // KV tableKVs: testIndexerTableKVs, tableTombstones: testIndexerTableTombstones, diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 6edbc5545..e79cf6ef9 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -72,6 +72,7 @@ const ( SystemMetadataRequestType = 31 ServiceVirtualIPRequestType = 32 FreeVirtualIPRequestType = 33 + KindServiceNamesType = 34 ) // if a new request type is added above it must be @@ -114,6 +115,7 @@ var requestTypeStrings = map[MessageType]string{ SystemMetadataRequestType: "SystemMetadata", ServiceVirtualIPRequestType: "ServiceVirtualIP", FreeVirtualIPRequestType: "FreeVirtualIP", + KindServiceNamesType: "KindServiceName", } const ( @@ -1029,6 +1031,13 @@ type ServiceNodes []*ServiceNode // ServiceKind is the kind of service being registered. type ServiceKind string +func (k ServiceKind) Normalized() string { + if k == ServiceKindTypical { + return "typical" + } + return string(k) +} + const ( // ServiceKindTypical is a typical, classic Consul service. This is // represented by the absence of a value. This was chosen for ease of From 4acbdc4618090df930be3dcc6df0fa8d02ec7da7 Mon Sep 17 00:00:00 2001 From: freddygv Date: Tue, 30 Nov 2021 23:50:14 -0700 Subject: [PATCH 25/60] Avoid updating default decision from wildcard ixn Given that we do not allow wildcard partitions in intentions, no one ixn can override the DefaultAllow setting. Only the default ACL policy applies across all partitions. --- agent/consul/state/intention.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 72850b29e..ed71f1049 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -995,19 +995,6 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, maxIdx = index } - // Check for a wildcard intention (* -> *) since it overrides the default decision from ACLs - if len(intentions) > 0 { - // Intentions with wildcard source and destination have the lowest precedence, so they are last in the list - ixn := intentions[len(intentions)-1] - - if ixn.HasWildcardSource() && ixn.HasWildcardDestination() { - defaultDecision = acl.Allow - if ixn.Action == structs.IntentionActionDeny { - defaultDecision = acl.Deny - } - } - } - index, allServices, err := serviceListTxn(tx, ws, func(svc *structs.ServiceNode) bool { // Only include ingress gateways as downstreams, since they cannot receive service mesh traffic // TODO(freddy): One remaining issue is that this includes non-Connect services (typical services without a proxy) From fcfed672468e6194e686a1cebdd8d2e751666966 Mon Sep 17 00:00:00 2001 From: freddygv Date: Thu, 2 Dec 2021 09:06:39 -0700 Subject: [PATCH 26/60] Update intention topology to use new table --- agent/consul/catalog_endpoint.go | 2 +- agent/consul/state/catalog.go | 20 ++++++++---------- agent/consul/state/catalog_test.go | 8 +++---- agent/consul/state/intention.go | 34 ++++++++++++++++++------------ 4 files changed, 35 insertions(+), 29 deletions(-) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index cfddeff18..b853b4aca 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -570,7 +570,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.ServiceList(ws, nil, &args.EnterpriseMeta) + index, services, err := state.ServiceList(ws, &args.EnterpriseMeta) if err != nil { return err } diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 896cbc1ee..31bef38e3 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -968,16 +968,14 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui return idx, results, nil } -func (s *Store) ServiceList(ws memdb.WatchSet, - include func(svc *structs.ServiceNode) bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { tx := s.db.Txn(false) defer tx.Abort() - return serviceListTxn(tx, ws, include, entMeta) + return serviceListTxn(tx, ws, entMeta) } -func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, - include func(svc *structs.ServiceNode) bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) { idx := catalogServicesMaxIndex(tx, entMeta) services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) @@ -989,11 +987,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, unique := make(map[structs.ServiceName]struct{}) for service := services.Next(); service != nil; service = services.Next() { svc := service.(*structs.ServiceNode) - // TODO (freddy) This is a hack to exclude certain kinds. - // Need a new index to query by kind and namespace, have to coordinate with consul foundations first - if include == nil || include(svc) { - unique[svc.CompoundServiceName()] = struct{}{} - } + unique[svc.CompoundServiceName()] = struct{}{} } results := make(structs.ServiceList, 0, len(unique)) @@ -2532,10 +2526,14 @@ func (s *Store) VirtualIPForService(sn structs.ServiceName) (string, error) { return result.String(), nil } -func (s *Store) KindServiceNamesOfKind(ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { +func (s *Store) ServiceNamesOfKind(ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { tx := s.db.Txn(false) defer tx.Abort() + return serviceNamesOfKindTxn(tx, ws, kind) +} + +func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { var names []*KindServiceName iter, err := tx.Get(tableKindServiceNames, indexKindOnly, kind) if err != nil { diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index c4d7a775a..bfca9a2d9 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -7714,7 +7714,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { require.NoError(t, s.EnsureService(idx, "node1", &svc)) // Ensure the service name was stored for all of them under the appropriate kind - gotIdx, gotNames, err := s.KindServiceNamesOfKind(nil, svc.Kind) + gotIdx, gotNames, err := s.ServiceNamesOfKind(nil, svc.Kind) require.NoError(t, err) require.Equal(t, idx, gotIdx) require.Len(t, gotNames, 1) @@ -7734,7 +7734,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { idx++ require.NoError(t, s.EnsureService(idx, "node1", &newIngress)) - gotIdx, got, err := s.KindServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) + gotIdx, got, err := s.ServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) require.NoError(t, err) require.Equal(t, idx, gotIdx) @@ -7779,7 +7779,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { idx++ require.NoError(t, s.EnsureService(idx, "node1", &newMGW)) - gotIdx, _, err = s.KindServiceNamesOfKind(nil, structs.ServiceKindMeshGateway) + gotIdx, _, err = s.ServiceNamesOfKind(nil, structs.ServiceKindMeshGateway) require.NoError(t, err) require.Equal(t, uint64(2), gotIdx) @@ -7787,7 +7787,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { idx++ require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta)) - gotIdx, got, err = s.KindServiceNamesOfKind(nil, structs.ServiceKindTypical) + gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindTypical) require.NoError(t, err) require.Equal(t, idx, gotIdx) require.Empty(t, got) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index ed71f1049..f2f64500f 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -995,23 +995,29 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, maxIdx = index } - index, allServices, err := serviceListTxn(tx, ws, func(svc *structs.ServiceNode) bool { - // Only include ingress gateways as downstreams, since they cannot receive service mesh traffic - // TODO(freddy): One remaining issue is that this includes non-Connect services (typical services without a proxy) - // Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy. - // Maybe start tracking services represented by proxies? (both sidecar and ingress) - if svc.ServiceKind == structs.ServiceKindTypical || (svc.ServiceKind == structs.ServiceKindIngressGateway && downstreams) { - return true - } - return false - }, target.WithWildcardNamespace()) + // TODO(tproxy): One remaining improvement is that this includes non-Connect services (typical services without a proxy) + // Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy. + // Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar-proxy, terminating) + index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical) if err != nil { - return index, nil, fmt.Errorf("failed to fetch catalog service list: %v", err) + return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) } if index > maxIdx { maxIdx = index } + if downstreams { + // Ingress gateways can only ever be downstreams, since mesh services don't dial them. + index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway) + if err != nil { + return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) + } + if index > maxIdx { + maxIdx = index + } + services = append(services, ingress...) + } + // When checking authorization to upstreams, the match type for the decision is `destination` because we are deciding // if upstream candidates are covered by intentions that have the target service as a source. // The reverse is true for downstreams. @@ -1019,11 +1025,13 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, if downstreams { decisionMatchType = structs.IntentionMatchSource } - result := make([]ServiceWithDecision, 0, len(allServices)) - for _, candidate := range allServices { + result := make([]ServiceWithDecision, 0, len(services)) + for _, svc := range services { + candidate := svc.Service if candidate.Name == structs.ConsulServiceName { continue } + opts := IntentionDecisionOpts{ Target: candidate.Name, Namespace: candidate.NamespaceOrDefault(), From 478f532ba25612ceeaabc1a4f13ed9a26d83902b Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 17:31:42 -0700 Subject: [PATCH 27/60] Add changelog entry --- .changelog/11738.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11738.txt diff --git a/.changelog/11738.txt b/.changelog/11738.txt new file mode 100644 index 000000000..6584863e4 --- /dev/null +++ b/.changelog/11738.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: **(Enterprise only)** add support for cross-partition transparent proxying. +``` \ No newline at end of file From 02fb32365207655ddf9d5c0a2603a3a06daae3a8 Mon Sep 17 00:00:00 2001 From: freddygv Date: Thu, 2 Dec 2021 23:50:38 -0700 Subject: [PATCH 28/60] Rename partition-exports to exported-services Using a name less tied to partitions gives us more flexibility to use this config entry in OSS for exports between datacenters/meshes. --- acl/acl.go | 4 +-- agent/consul/acl.go | 4 +-- agent/consul/config_endpoint.go | 6 ++-- agent/consul/config_endpoint_test.go | 18 +++++----- agent/consul/config_replication.go | 2 +- agent/consul/state/config_entry.go | 2 +- .../usagemetrics/usagemetrics_oss_test.go | 24 ++++++------- agent/structs/config_entry.go | 8 ++--- agent/structs/config_entry_exports.go | 36 +++++++++---------- agent/structs/config_entry_test.go | 8 ++--- api/config_entry.go | 6 ++-- api/config_entry_exports.go | 28 +++++++-------- command/config/write/config_write_test.go | 12 +++---- .../config-entries/partition-exports.mdx | 30 ++++++++-------- .../docs/enterprise/admin-partitions.mdx | 2 +- website/content/docs/k8s/crds/index.mdx | 2 +- website/data/docs-nav-data.json | 2 +- 17 files changed, 97 insertions(+), 97 deletions(-) diff --git a/acl/acl.go b/acl/acl.go index ff605ade4..d56383d9f 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -16,10 +16,10 @@ type Config struct { type ExportFetcher interface { // ExportsForPartition returns the config entry defining exports for a partition - ExportsForPartition(partition string) PartitionExports + ExportsForPartition(partition string) ExportedServices } -type PartitionExports struct { +type ExportedServices struct { Data map[string]map[string][]string } diff --git a/agent/consul/acl.go b/agent/consul/acl.go index e5ae2cdaf..db7d415a1 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1980,6 +1980,6 @@ func filterACL(r *ACLResolver, token string, subj interface{}) error { type partitionInfoNoop struct{} -func (p *partitionInfoNoop) ExportsForPartition(partition string) acl.PartitionExports { - return acl.PartitionExports{} +func (p *partitionInfoNoop) ExportsForPartition(partition string) acl.ExportedServices { + return acl.ExportedServices{} } diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index 6dca1a032..97a2c72a7 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -597,7 +597,7 @@ func gateWriteToSecondary(targetDC, localDC, primaryDC, kind string) error { // Partition exports are gated from interactions from secondary DCs // because non-default partitions cannot be created in secondaries // and services cannot be exported to another datacenter. - if kind != structs.PartitionExports { + if kind != structs.ExportedServices { return nil } if localDC == "" { @@ -611,10 +611,10 @@ func gateWriteToSecondary(targetDC, localDC, primaryDC, kind string) error { switch { case targetDC == "" && localDC != primaryDC: - return fmt.Errorf("partition-exports writes in secondary datacenters must target the primary datacenter explicitly.") + return fmt.Errorf("exported-services writes in secondary datacenters must target the primary datacenter explicitly.") case targetDC != "" && targetDC != primaryDC: - return fmt.Errorf("partition-exports writes must not target secondary datacenters.") + return fmt.Errorf("exported-services writes must not target secondary datacenters.") } return nil diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index 81c3a2b15..f247cf4b9 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -2093,7 +2093,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "", localDC: "dc1", primaryDC: "", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, }, { @@ -2102,7 +2102,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "", localDC: "dc1", primaryDC: "dc1", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, }, { @@ -2111,7 +2111,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "dc1", localDC: "dc1", primaryDC: "dc1", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, }, { @@ -2120,7 +2120,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "dc2", localDC: "dc1", primaryDC: "", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, wantErr: "writes must not target secondary datacenters", }, @@ -2130,7 +2130,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "dc2", localDC: "dc1", primaryDC: "dc1", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, wantErr: "writes must not target secondary datacenters", }, @@ -2140,7 +2140,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "dc2", localDC: "dc2", primaryDC: "dc1", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, wantErr: "writes must not target secondary datacenters", }, @@ -2150,7 +2150,7 @@ func Test_gateWriteToSecondary(t *testing.T) { targetDC: "", localDC: "dc2", primaryDC: "dc1", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, wantErr: "must target the primary datacenter explicitly", }, @@ -2158,7 +2158,7 @@ func Test_gateWriteToSecondary(t *testing.T) { name: "empty local DC", args: args{ localDC: "", - kind: structs.PartitionExports, + kind: structs.ExportedServices, }, wantErr: "unknown local datacenter", }, @@ -2179,7 +2179,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) { } for _, kind := range structs.AllConfigEntryKinds { - if kind == structs.PartitionExports { + if kind == structs.ExportedServices { continue } diff --git a/agent/consul/config_replication.go b/agent/consul/config_replication.go index e5cb8e533..8c9c1377b 100644 --- a/agent/consul/config_replication.go +++ b/agent/consul/config_replication.go @@ -93,7 +93,7 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con for i, entry := range configs { // Partition exports only apply to the primary datacenter. - if entry.GetKind() == structs.PartitionExports { + if entry.GetKind() == structs.ExportedServices { continue } req := structs.ConfigEntryRequest{ diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index e4b7b9f14..594e49e1a 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -395,7 +395,7 @@ func validateProposedConfigEntryInGraph( } case structs.ServiceIntentions: case structs.MeshConfig: - case structs.PartitionExports: + case structs.ExportedServices: default: return fmt.Errorf("unhandled kind %q during validation of %q", kindName.Kind, kindName.Name) } diff --git a/agent/consul/usagemetrics/usagemetrics_oss_test.go b/agent/consul/usagemetrics/usagemetrics_oss_test.go index 4d90a84a9..5ab34256f 100644 --- a/agent/consul/usagemetrics/usagemetrics_oss_test.go +++ b/agent/consul/usagemetrics/usagemetrics_oss_test.go @@ -178,12 +178,12 @@ func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -363,12 +363,12 @@ func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -576,12 +576,12 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -803,12 +803,12 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -1007,12 +1007,12 @@ func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, @@ -1201,12 +1201,12 @@ func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { {Name: "kind", Value: "terminating-gateway"}, }, }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=partition-exports": { + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { Name: "consul.usage.test.consul.state.config_entries", Value: 0, Labels: []metrics.Label{ {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "partition-exports"}, + {Name: "kind", Value: "exported-services"}, }, }, }, diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index a7703c45a..3ea7c18f3 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -27,7 +27,7 @@ const ( TerminatingGateway string = "terminating-gateway" ServiceIntentions string = "service-intentions" MeshConfig string = "mesh" - PartitionExports string = "partition-exports" + ExportedServices string = "exported-services" ProxyConfigGlobal string = "global" MeshConfigMesh string = "mesh" @@ -45,7 +45,7 @@ var AllConfigEntryKinds = []string{ TerminatingGateway, ServiceIntentions, MeshConfig, - PartitionExports, + ExportedServices, } // ConfigEntry is the interface for centralized configuration stored in Raft. @@ -533,8 +533,8 @@ func MakeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceIntentionsConfigEntry{Name: name}, nil case MeshConfig: return &MeshConfigEntry{}, nil - case PartitionExports: - return &PartitionExportsConfigEntry{Name: name}, nil + case ExportedServices: + return &ExportedServicesConfigEntry{Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index 7b9d7cfb8..4f11a4b7b 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -7,9 +7,9 @@ import ( "github.com/hashicorp/consul/acl" ) -// PartitionExportsConfigEntry is the top-level struct for exporting a service to be exposed +// ExportedServicesConfigEntry is the top-level struct for exporting a service to be exposed // across other admin partitions. -type PartitionExportsConfigEntry struct { +type ExportedServicesConfigEntry struct { Name string // Services is a list of services to be exported and the list of partitions @@ -40,7 +40,7 @@ type ServiceConsumer struct { Partition string } -func (e *PartitionExportsConfigEntry) ToMap() map[string]map[string][]string { +func (e *ExportedServicesConfigEntry) ToMap() map[string]map[string][]string { resp := make(map[string]map[string][]string) for _, svc := range e.Services { if _, ok := resp[svc.Namespace]; !ok { @@ -57,7 +57,7 @@ func (e *PartitionExportsConfigEntry) ToMap() map[string]map[string][]string { return resp } -func (e *PartitionExportsConfigEntry) Clone() *PartitionExportsConfigEntry { +func (e *ExportedServicesConfigEntry) Clone() *ExportedServicesConfigEntry { e2 := *e e2.Services = make([]ExportedService, len(e.Services)) for _, svc := range e.Services { @@ -72,11 +72,11 @@ func (e *PartitionExportsConfigEntry) Clone() *PartitionExportsConfigEntry { return &e2 } -func (e *PartitionExportsConfigEntry) GetKind() string { - return PartitionExports +func (e *ExportedServicesConfigEntry) GetKind() string { + return ExportedServices } -func (e *PartitionExportsConfigEntry) GetName() string { +func (e *ExportedServicesConfigEntry) GetName() string { if e == nil { return "" } @@ -84,14 +84,14 @@ func (e *PartitionExportsConfigEntry) GetName() string { return e.Name } -func (e *PartitionExportsConfigEntry) GetMeta() map[string]string { +func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { if e == nil { return nil } return e.Meta } -func (e *PartitionExportsConfigEntry) Normalize() error { +func (e *ExportedServicesConfigEntry) Normalize() error { if e == nil { return fmt.Errorf("config entry is nil") } @@ -105,12 +105,12 @@ func (e *PartitionExportsConfigEntry) Normalize() error { return nil } -func (e *PartitionExportsConfigEntry) Validate() error { +func (e *ExportedServicesConfigEntry) Validate() error { if e.Name == "" { return fmt.Errorf("Name is required") } if e.Name == WildcardSpecifier { - return fmt.Errorf("partition-exports Name must be the name of a partition, and not a wildcard") + return fmt.Errorf("exported-services Name must be the name of a partition, and not a wildcard") } if err := requireEnterprise(e.GetKind()); err != nil { @@ -136,19 +136,19 @@ func (e *PartitionExportsConfigEntry) Validate() error { return nil } -func (e *PartitionExportsConfigEntry) CanRead(authz acl.Authorizer) bool { +func (e *ExportedServicesConfigEntry) CanRead(authz acl.Authorizer) bool { var authzContext acl.AuthorizerContext e.FillAuthzContext(&authzContext) return authz.MeshRead(&authzContext) == acl.Allow } -func (e *PartitionExportsConfigEntry) CanWrite(authz acl.Authorizer) bool { +func (e *ExportedServicesConfigEntry) CanWrite(authz acl.Authorizer) bool { var authzContext acl.AuthorizerContext e.FillAuthzContext(&authzContext) return authz.MeshWrite(&authzContext) == acl.Allow } -func (e *PartitionExportsConfigEntry) GetRaftIndex() *RaftIndex { +func (e *ExportedServicesConfigEntry) GetRaftIndex() *RaftIndex { if e == nil { return &RaftIndex{} } @@ -156,7 +156,7 @@ func (e *PartitionExportsConfigEntry) GetRaftIndex() *RaftIndex { return &e.RaftIndex } -func (e *PartitionExportsConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { +func (e *ExportedServicesConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { if e == nil { return nil } @@ -168,13 +168,13 @@ func (e *PartitionExportsConfigEntry) GetEnterpriseMeta() *EnterpriseMeta { // correct type. // This method is implemented on the structs type (as apposed to the api type) // because that is what the API currently uses to return a response. -func (e *PartitionExportsConfigEntry) MarshalJSON() ([]byte, error) { - type Alias PartitionExportsConfigEntry +func (e *ExportedServicesConfigEntry) MarshalJSON() ([]byte, error) { + type Alias ExportedServicesConfigEntry source := &struct { Kind string *Alias }{ - Kind: PartitionExports, + Kind: ExportedServices, Alias: (*Alias)(e), } return json.Marshal(source) diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index 294c9e40b..febe75012 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -1665,9 +1665,9 @@ func TestDecodeConfigEntry(t *testing.T) { }, }, { - name: "partition-exports", + name: "exported-services", snake: ` - kind = "partition-exports" + kind = "exported-services" name = "foo" meta { "foo" = "bar" @@ -1698,7 +1698,7 @@ func TestDecodeConfigEntry(t *testing.T) { ] `, camel: ` - Kind = "partition-exports" + Kind = "exported-services" Name = "foo" Meta { "foo" = "bar" @@ -1728,7 +1728,7 @@ func TestDecodeConfigEntry(t *testing.T) { } ] `, - expect: &PartitionExportsConfigEntry{ + expect: &ExportedServicesConfigEntry{ Name: "foo", Meta: map[string]string{ "foo": "bar", diff --git a/api/config_entry.go b/api/config_entry.go index f5fbbbce4..91c407bb5 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -22,7 +22,7 @@ const ( TerminatingGateway string = "terminating-gateway" ServiceIntentions string = "service-intentions" MeshConfig string = "mesh" - PartitionExports string = "partition-exports" + ExportedServices string = "exported-services" ProxyConfigGlobal string = "global" MeshConfigMesh string = "mesh" @@ -277,8 +277,8 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil case MeshConfig: return &MeshConfigEntry{}, nil - case PartitionExports: - return &PartitionExportsConfigEntry{Name: name}, nil + case ExportedServices: + return &ExportedServicesConfigEntry{Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } diff --git a/api/config_entry_exports.go b/api/config_entry_exports.go index 0b6650107..ae9cb2ff6 100644 --- a/api/config_entry_exports.go +++ b/api/config_entry_exports.go @@ -2,14 +2,14 @@ package api import "encoding/json" -// PartitionExportsConfigEntry manages the exported services for a single admin partition. +// ExportedServicesConfigEntry manages the exported services for a single admin partition. // Admin Partitions are a Consul Enterprise feature. -type PartitionExportsConfigEntry struct { - // Name is the name of the partition the PartitionExportsConfigEntry applies to. +type ExportedServicesConfigEntry struct { + // Name is the name of the partition the ExportedServicesConfigEntry applies to. // Partitioning is a Consul Enterprise feature. Name string `json:",omitempty"` - // Partition is the partition where the PartitionExportsConfigEntry is stored. + // Partition is the partition where the ExportedServicesConfigEntry is stored. // If the partition does not match the name, the name will overwrite the partition. // Partitioning is a Consul Enterprise feature. Partition string `json:",omitempty"` @@ -49,23 +49,23 @@ type ServiceConsumer struct { Partition string } -func (e *PartitionExportsConfigEntry) GetKind() string { return PartitionExports } -func (e *PartitionExportsConfigEntry) GetName() string { return e.Name } -func (e *PartitionExportsConfigEntry) GetPartition() string { return e.Name } -func (e *PartitionExportsConfigEntry) GetNamespace() string { return IntentionDefaultNamespace } -func (e *PartitionExportsConfigEntry) GetMeta() map[string]string { return e.Meta } -func (e *PartitionExportsConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *PartitionExportsConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } +func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } +func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } +func (e *ExportedServicesConfigEntry) GetNamespace() string { return IntentionDefaultNamespace } +func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } // MarshalJSON adds the Kind field so that the JSON can be decoded back into the // correct type. -func (e *PartitionExportsConfigEntry) MarshalJSON() ([]byte, error) { - type Alias PartitionExportsConfigEntry +func (e *ExportedServicesConfigEntry) MarshalJSON() ([]byte, error) { + type Alias ExportedServicesConfigEntry source := &struct { Kind string *Alias }{ - Kind: PartitionExports, + Kind: ExportedServices, Alias: (*Alias)(e), } return json.Marshal(source) diff --git a/command/config/write/config_write_test.go b/command/config/write/config_write_test.go index 5e145b51f..fc80e3b98 100644 --- a/command/config/write/config_write_test.go +++ b/command/config/write/config_write_test.go @@ -2722,9 +2722,9 @@ func TestParseConfigEntry(t *testing.T) { }, }, { - name: "partition-exports", + name: "exported-services", snake: ` - kind = "partition-exports" + kind = "exported-services" name = "foo" meta { "foo" = "bar" @@ -2755,7 +2755,7 @@ func TestParseConfigEntry(t *testing.T) { ] `, camel: ` - Kind = "partition-exports" + Kind = "exported-services" Name = "foo" Meta { "foo" = "bar" @@ -2787,7 +2787,7 @@ func TestParseConfigEntry(t *testing.T) { `, snakeJSON: ` { - "kind": "partition-exports", + "kind": "exported-services", "name": "foo", "meta": { "foo": "bar", @@ -2820,7 +2820,7 @@ func TestParseConfigEntry(t *testing.T) { `, camelJSON: ` { - "Kind": "partition-exports", + "Kind": "exported-services", "Name": "foo", "Meta": { "foo": "bar", @@ -2851,7 +2851,7 @@ func TestParseConfigEntry(t *testing.T) { ] } `, - expect: &api.PartitionExportsConfigEntry{ + expect: &api.ExportedServicesConfigEntry{ Name: "foo", Meta: map[string]string{ "foo": "bar", diff --git a/website/content/docs/connect/config-entries/partition-exports.mdx b/website/content/docs/connect/config-entries/partition-exports.mdx index 94a61569d..2e7bed5c1 100644 --- a/website/content/docs/connect/config-entries/partition-exports.mdx +++ b/website/content/docs/connect/config-entries/partition-exports.mdx @@ -2,21 +2,21 @@ layout: docs page_title: 'Configuration Entry Kind: Partition Exports' description: >- - The partition-exports configuration entry enables you to export services from a single file. + The exported-services configuration entry enables you to export services from a single file. Settings in this configuration entry can apply to services in any namespace of the specified partition. Write access to the mesh resource is required. --- # Partition Exports -This topic describes the `partition-exports` configuration entry type. The `partition-exports` configuration entry enables Consul to export service instances to other admin partitions from a single file. This enables your services to be networked across admin partitions. See [Admin Partitions](/docs/enterprise/admin-partitions) for additional information. +This topic describes the `exported-services` configuration entry type. The `exported-services` configuration entry enables Consul to export service instances to other admin partitions from a single file. This enables your services to be networked across admin partitions. See [Admin Partitions](/docs/enterprise/admin-partitions) for additional information. -> **v1.11.0+:** This config entry is supported in Consul versions 1.11.0+. ## Introduction -You can configure Consul to export services contained in an admin partition to one or more additional partitions by declaring the `partition-exports` configuration entry in the `kind` field. This enables you to route traffic between services in different clusters that share a single set of Consul servers. +You can configure Consul to export services contained in an admin partition to one or more additional partitions by declaring the `exported-services` configuration entry in the `kind` field. This enables you to route traffic between services in different clusters that share a single set of Consul servers. -You can configure the settings defined in the `partition-exports` configuration entry to apply to all namespaces and federated datacenters. +You can configure the settings defined in the `exported-services` configuration entry to apply to all namespaces and federated datacenters. ## Requirements @@ -27,20 +27,20 @@ You can configure the settings defined in the `partition-exports` configuration ## Usage 1. Verify that your datacenter meets the conditions specified in the [Requirements](#requirements). -1. Specify the `partition-exports` configuration in the agent configuration file (see [`config_entries`](/docs/agent/options#config_entries)) as described in [Configuration](#configuration). +1. Specify the `exported-services` configuration in the agent configuration file (see [`config_entries`](/docs/agent/options#config_entries)) as described in [Configuration](#configuration). 1. Apply the configuration using one of the following methods: * Kubernetes CRD: Refer to the [Custom Resource Definitions](/docs/k8s/crds) documentation for details. * Issue the `consul config write` command: Refer to the [Consul Config Write](/commands/config/write) documentation for details. ## Configuration -Configure the following parameters to define a `partition-exports` configuration entry: +Configure the following parameters to define a `exported-services` configuration entry: ```hcl -Kind = "partition-exports" +Kind = "exported-services" Partition = "" Services = [ { @@ -59,7 +59,7 @@ Services = [ ```yaml apiVersion: consul.hashicorp.com/v1alpha1 -Kind: PartitionExports +Kind: ExportedServices Partition: Services: - Consumers: @@ -71,7 +71,7 @@ Services: ```json -"Kind": "partition-exports", +"Kind": "exported-services", "Partition": "", "Services": [ { @@ -90,11 +90,11 @@ Services: ### Configuration Parameters -The following table describes the parameters associated with the `partition-exports` configuration entry. +The following table describes the parameters associated with the `exported-services` configuration entry. | Parameter | Description | Required | Default | | --- | --- | --- | --- | -| `Kind` | String value that enables the configuration entry. The value should always be `partition-exports` (HCL and JSON) or `PartitionExports` (YAML) | Required | None | +| `Kind` | String value that enables the configuration entry. The value should always be `exported-services` (HCL and JSON) or `ExportedServices` (YAML) | Required | None | | `Partition` | String value that specifies the name of the partition that contains the services you want to export. | Required | None | | `Services` | List of objects that specify which services to export. See [`Services`](#services) for details. | Required | None| | `Meta` | Object that defines a map of the max 64 key/value pairs. | Optional | None | @@ -115,7 +115,7 @@ The following example configures the agent to export the `billing` service from ```hcl -Kind = "partition-exports" +Kind = "exported-services" Partition = "finance" Services = [ @@ -147,7 +147,7 @@ Services = [ ```yaml -Kind: partition-exports +Kind: exported-services Partition: finance Services: - Consumers: @@ -165,7 +165,7 @@ Services: ```json -"Kind": "partition-exports", +"Kind": "exported-services", "Partition": "finance", "Services": [ { @@ -207,4 +207,4 @@ An ACL token with `service:write` permissions is required for the partition from Exports are available to all services in the consumer partition. In the previous example, any service with `write` permissions for the `frontend` partition will be able to read exports. -See [Health HTTP Endpoint](/api-docs/health) for additional information. \ No newline at end of file +See [Health HTTP Endpoint](/api-docs/health) for additional information. diff --git a/website/content/docs/enterprise/admin-partitions.mdx b/website/content/docs/enterprise/admin-partitions.mdx index b00ac189f..cb613d1ab 100644 --- a/website/content/docs/enterprise/admin-partitions.mdx +++ b/website/content/docs/enterprise/admin-partitions.mdx @@ -55,7 +55,7 @@ Values specified for [`proxy-defaults`](/docs/connect/config-entries/proxy-defau ### Cross-partition Networking -You can configure services to be discoverable and accessible by downstream services in any partition within the datacenter. Specify the upstream services that you want to be available for discovery by configuring the `partition-exports` configuration entry in the partition where the services are registered. Refer to the [`partition-exports` documentation](/docs/connect/config-entries/partition-exports) for details. +You can configure services to be discoverable and accessible by downstream services in any partition within the datacenter. Specify the upstream services that you want to be available for discovery by configuring the `exported-services` configuration entry in the partition where the services are registered. Refer to the [`exported-services` documentation](/docs/connect/config-entries/exported-services) for details. Additionally, the `upstreams` configuration for proxies in the source partition must specify the name of the destination partition so that listeners can be created. Refer to the [Upstream Configuration Reference](/docs/connect/registration/service-registration#upstream-configuration-reference) for additional information. diff --git a/website/content/docs/k8s/crds/index.mdx b/website/content/docs/k8s/crds/index.mdx index 8364c22d3..1c8eded43 100644 --- a/website/content/docs/k8s/crds/index.mdx +++ b/website/content/docs/k8s/crds/index.mdx @@ -23,7 +23,7 @@ via Kubernetes Custom Resources. Configuration entries provide cluster-wide defa You can specify the following values in the `kind` field. Click on a configuration entry to view its documentation: - [`Mesh`](/docs/connect/config-entries/mesh) (requires Consul 1.10.0+) -- [`PartitionExports`](/docs/connect/config-entries/partition-exports) +- [`ExportedServices`](/docs/connect/config-entries/exported-services) - [`ProxyDefaults`](/docs/connect/config-entries/proxy-defaults) - [`ServiceDefaults`](/docs/connect/config-entries/service-defaults) - [`ServiceSplitter`](/docs/connect/config-entries/service-splitter) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index c081d0fbf..1d0a3fb6c 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -152,7 +152,7 @@ }, { "title": "Partition Exports", - "path": "connect/config-entries/partition-exports" + "path": "connect/config-entries/exported-services" }, { "title": "Proxy Defaults", From 32a20120594bcc3694b64178972aa9d2c0c50143 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 17:46:20 -0700 Subject: [PATCH 29/60] Move exported-services docs based on new name --- .../{partition-exports.mdx => exported-services.mdx} | 0 website/redirects.next.js | 5 +++++ 2 files changed, 5 insertions(+) rename website/content/docs/connect/config-entries/{partition-exports.mdx => exported-services.mdx} (100%) diff --git a/website/content/docs/connect/config-entries/partition-exports.mdx b/website/content/docs/connect/config-entries/exported-services.mdx similarity index 100% rename from website/content/docs/connect/config-entries/partition-exports.mdx rename to website/content/docs/connect/config-entries/exported-services.mdx diff --git a/website/redirects.next.js b/website/redirects.next.js index 4062050f3..4b8905597 100644 --- a/website/redirects.next.js +++ b/website/redirects.next.js @@ -86,6 +86,11 @@ module.exports = [ destination: '/docs/connect/registration/sidecar-service', permanent: true, }, + { + source: '/docs/connect/config-entries/partition-exports', + destination: '/docs/connect/config-entries/exported-services', + permanent: true, + }, { source: '/docs/enterprise/license', destination: '/docs/enterprise/license/overview', From 9e05c21c7aa22f3fc505dd2c373976c5a9cbc988 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 3 Dec 2021 17:50:40 -0700 Subject: [PATCH 30/60] Add changelog entry --- .changelog/11739.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11739.txt diff --git a/.changelog/11739.txt b/.changelog/11739.txt new file mode 100644 index 000000000..6040dddf7 --- /dev/null +++ b/.changelog/11739.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: **(Enterprise Only)** rename partition-exports config entry to exported-services. +``` \ No newline at end of file From 78a008daf642a6c562417ff6688b0fa84e3859f1 Mon Sep 17 00:00:00 2001 From: Mike Morris Date: Fri, 3 Dec 2021 20:17:55 -0500 Subject: [PATCH 31/60] types: add types/tls.go for strongly-typed TLS versions and cipher suites (#11645) types: add TLS constants types: distinguish between human and Envoy serialization for TLSVersion constants types: add DeprecatedAgentTLSVersions for backwards compatibility types: add methods for printing TLSVersion as strings types: add TLSVersionInvalid error value types: add a basic test for TLSVersion comparison types: add TLS cihper suite mapping using IANA constant names and values types: adding ConsulAutoConfigTLSVersionStrings changelog: add entry for TLSVersion and TLSCipherSuite types types: initialize TLSVerison constants starting at zero types: remove TLSVersionInvalid < 0 test types: update note for ConsulAutoConfigTLSVersionStrings types: programmatically invert TLSCipherSuites for HumanTLSCipherSuiteStrings lookup map Co-authored-by: Dan Upton types: add test for TLSVersion zero-value types: remove unused EnvoyTLSVersionStrings types: implement MarshalJSON for TLSVersion types: implement TLSVersionUnspecified as zero value types: delegate TLS.MarshalJSON to json.Marshal, use ConsulConfigTLSVersionStrings as default String() values Co-authored-by: Dan Upton --- .changelog/11645.txt | 3 + types/tls.go | 185 +++++++++++++++++++++++++++++++++++++++++++ types/tls_test.go | 49 ++++++++++++ 3 files changed, 237 insertions(+) create mode 100644 .changelog/11645.txt create mode 100644 types/tls.go create mode 100644 types/tls_test.go diff --git a/.changelog/11645.txt b/.changelog/11645.txt new file mode 100644 index 000000000..845795327 --- /dev/null +++ b/.changelog/11645.txt @@ -0,0 +1,3 @@ +```release-note:improvement +types: add TLSVersion and TLSCipherSuite +``` diff --git a/types/tls.go b/types/tls.go new file mode 100644 index 000000000..66c10b19b --- /dev/null +++ b/types/tls.go @@ -0,0 +1,185 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// TLSVersion is a strongly-typed int used for relative comparison +// (minimum, maximum, greater than, less than) of TLS versions +type TLSVersion int + +const ( + // Error value, excluded from lookup maps + TLSVersionInvalid TLSVersion = iota - 1 + + // Explicit unspecified zero-value to avoid overwriting parent defaults + TLSVersionUnspecified + + // Explictly allow implementation to select TLS version + // May be useful to supercede defaults specified at a higher layer + TLSVersionAuto + + _ // Placeholder for SSLv3, hopefully we won't have to add this + + // TLS versions + TLSv1_0 + TLSv1_1 + TLSv1_2 + TLSv1_3 +) + +var ( + TLSVersions = map[string]TLSVersion{ + "TLS_AUTO": TLSVersionAuto, + "TLSv1_0": TLSv1_0, + "TLSv1_1": TLSv1_1, + "TLSv1_2": TLSv1_2, + "TLSv1_3": TLSv1_3, + } + // NOTE: This interface is deprecated in favor of TLSVersions + // and should be eventually removed in a future release. + DeprecatedConsulAgentTLSVersions = map[string]TLSVersion{ + "": TLSVersionAuto, + "tls10": TLSv1_0, + "tls11": TLSv1_1, + "tls12": TLSv1_2, + "tls13": TLSv1_3, + } + HumanTLSVersionStrings = map[TLSVersion]string{ + TLSVersionAuto: "Allow implementation to select TLS version", + TLSv1_0: "TLS 1.0", + TLSv1_1: "TLS 1.1", + TLSv1_2: "TLS 1.2", + TLSv1_3: "TLS 1.3", + } + ConsulConfigTLSVersionStrings = func() map[TLSVersion]string { + inverted := make(map[TLSVersion]string, len(TLSVersions)) + for k, v := range TLSVersions { + inverted[v] = k + } + return inverted + }() + // NOTE: these currently map to the deprecated config strings to support the + // deployment pattern of upgrading servers first. This map should eventually + // be removed and any lookups updated to use ConsulConfigTLSVersionStrings + // with newer config strings instead in a future release. + ConsulAutoConfigTLSVersionStrings = map[TLSVersion]string{ + TLSVersionAuto: "", + TLSv1_0: "tls10", + TLSv1_1: "tls11", + TLSv1_2: "tls12", + TLSv1_3: "tls13", + } +) + +func (v TLSVersion) String() string { + return ConsulConfigTLSVersionStrings[v] +} + +func (v TLSVersion) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *TLSVersion) UnmarshalJSON(bytes []byte) error { + versionStr := string(bytes) + + if n := len(versionStr); n > 1 && versionStr[0] == '"' && versionStr[n-1] == '"' { + versionStr = versionStr[1 : n-1] // trim surrounding quotes + } + + if version, ok := TLSVersions[versionStr]; ok { + *v = version + return nil + } + + *v = TLSVersionInvalid + return fmt.Errorf("no matching TLS Version found for %s", versionStr) +} + +// IANA cipher suite constants and values as defined at +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml +// This is the total list of TLS 1.2-style cipher suites +// which are currently supported by either Envoy 1.21 or the Consul agent +// via Go, and may change as some older suites are removed in future +// Envoy releases and Consul drops support for older Envoy versions, +// and as supported cipher suites in the Go runtime change. +// +// The naming convention for cipher suites changed in TLS 1.3 +// but constant values should still be globally unqiue +// Handling validation on a subset of TLSCipherSuite constants +// would be a future exercise if cipher suites for TLS 1.3 ever +// become configurable in BoringSSL, Envoy, or other implementation +type TLSCipherSuite uint16 + +const ( + // Envoy cipher suites also used by Consul agent + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLSCipherSuite = 0xc02b + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xcca9 // Not used by Consul agent yet + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xc02f + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xcca8 // Not used by Consul agent yet + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xc009 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xc013 + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xc02c + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xc030 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xc00a + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xc014 + + // Older cipher suites not supported for Consul agent TLS, will eventually be removed from Envoy defaults + TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009c + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002f + TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009d + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + + // Additional cipher suites used by Consul agent but not Envoy + // TODO: these are both explicitly listed as insecure and disabled in the Go source, should they be removed? + // https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/crypto/tls/cipher_suites.go;l=329-330 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0x0023 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xc027 +) + +var ( + TLSCipherSuites = map[string]TLSCipherSuite{ + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + + "TLS_RSA_WITH_AES_128_GCM_SHA256": TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_128_CBC_SHA": TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_GCM_SHA384": TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_RSA_WITH_AES_256_CBC_SHA": TLS_RSA_WITH_AES_256_CBC_SHA, + + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + } + HumanTLSCipherSuiteStrings = func() map[TLSCipherSuite]string { + inverted := make(map[TLSCipherSuite]string, len(TLSCipherSuites)) + for k, v := range TLSCipherSuites { + inverted[v] = k + } + return inverted + }() + EnvoyTLSCipherSuiteStrings = map[TLSCipherSuite]string{ + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "ECDHE-ECDSA-AES128-GCM-SHA256", + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: "ECDHE-ECDSA-CHACHA20-POLY1305", + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "ECDHE-RSA-AES128-GCM-SHA256", + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: "ECDHE-RSA-CHACHA20-POLY1305", + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "ECDHE-ECDSA-AES128-SHA", + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "ECDHE-RSA-AES128-SHA", + TLS_RSA_WITH_AES_128_GCM_SHA256: "AES128-GCM-SHA256", + TLS_RSA_WITH_AES_128_CBC_SHA: "AES128-SHA", + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "ECDHE-ECDSA-AES256-GCM-SHA384", + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "ECDHE-RSA-AES256-GCM-SHA384", + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "ECDHE-ECDSA-AES256-SHA", + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "ECDHE-RSA-AES256-SHA", + TLS_RSA_WITH_AES_256_GCM_SHA384: "AES256-GCM-SHA384", + TLS_RSA_WITH_AES_256_CBC_SHA: "AES256-SHA", + } +) diff --git a/types/tls_test.go b/types/tls_test.go new file mode 100644 index 000000000..0cf94e42f --- /dev/null +++ b/types/tls_test.go @@ -0,0 +1,49 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTLSVersion_PartialEq(t *testing.T) { + require.Greater(t, TLSv1_3, TLSv1_2) + require.Greater(t, TLSv1_2, TLSv1_1) + require.Greater(t, TLSv1_1, TLSv1_0) + + require.Less(t, TLSv1_2, TLSv1_3) + require.Less(t, TLSv1_1, TLSv1_2) + require.Less(t, TLSv1_0, TLSv1_1) +} + +func TestTLSVersion_Invalid(t *testing.T) { + var zeroValue TLSVersion + require.NotEqual(t, TLSVersionInvalid, zeroValue) + require.NotEqual(t, TLSVersionInvalid, TLSVersionUnspecified) + require.NotEqual(t, TLSVersionInvalid, TLSVersionAuto) +} + +func TestTLSVersion_Zero(t *testing.T) { + var zeroValue TLSVersion + require.Equal(t, TLSVersionUnspecified, zeroValue) + require.NotEqual(t, TLSVersionUnspecified, TLSVersionInvalid) + require.NotEqual(t, TLSVersionUnspecified, TLSVersionAuto) +} + +func TestTLSVersion_ToJSON(t *testing.T) { + var tlsVersion TLSVersion + err := tlsVersion.UnmarshalJSON([]byte(`"foo"`)) + require.Error(t, err) + require.Equal(t, tlsVersion, TLSVersionInvalid) + + for str, version := range TLSVersions { + versionJSON, err := json.Marshal(version) + require.NoError(t, err) + require.Equal(t, versionJSON, []byte(`"`+str+`"`)) + + err = tlsVersion.UnmarshalJSON([]byte(`"` + str + `"`)) + require.NoError(t, err) + require.Equal(t, tlsVersion, version) + } +} From a2fd30e5146a35a7906854c1faf64f95c2a67a80 Mon Sep 17 00:00:00 2001 From: freddygv Date: Sat, 4 Dec 2021 15:16:15 -0700 Subject: [PATCH 32/60] Clean up additional refs to partition exports --- agent/consul/config_endpoint.go | 2 +- agent/consul/config_replication.go | 2 +- .../docs/connect/config-entries/exported-services.mdx | 8 ++++---- website/data/docs-nav-data.json | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index 97a2c72a7..0fb5a6ef8 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -594,7 +594,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r } func gateWriteToSecondary(targetDC, localDC, primaryDC, kind string) error { - // Partition exports are gated from interactions from secondary DCs + // ExportedServices entries are gated from interactions from secondary DCs // because non-default partitions cannot be created in secondaries // and services cannot be exported to another datacenter. if kind != structs.ExportedServices { diff --git a/agent/consul/config_replication.go b/agent/consul/config_replication.go index 8c9c1377b..243cd8bb3 100644 --- a/agent/consul/config_replication.go +++ b/agent/consul/config_replication.go @@ -92,7 +92,7 @@ func (s *Server) reconcileLocalConfig(ctx context.Context, configs []structs.Con defer ticker.Stop() for i, entry := range configs { - // Partition exports only apply to the primary datacenter. + // Exported services only apply to the primary datacenter. if entry.GetKind() == structs.ExportedServices { continue } diff --git a/website/content/docs/connect/config-entries/exported-services.mdx b/website/content/docs/connect/config-entries/exported-services.mdx index 2e7bed5c1..c4dc21213 100644 --- a/website/content/docs/connect/config-entries/exported-services.mdx +++ b/website/content/docs/connect/config-entries/exported-services.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: 'Configuration Entry Kind: Partition Exports' +page_title: 'Configuration Entry Kind: Exported Services' description: >- The exported-services configuration entry enables you to export services from a single file. Settings in this configuration entry can apply to services in any namespace of the specified partition. Write access to the mesh resource is required. --- -# Partition Exports +# Exported Services This topic describes the `exported-services` configuration entry type. The `exported-services` configuration entry enables Consul to export service instances to other admin partitions from a single file. This enables your services to be networked across admin partitions. See [Admin Partitions](/docs/enterprise/admin-partitions) for additional information. @@ -21,7 +21,7 @@ You can configure the settings defined in the `exported-services` configuration ## Requirements * A Consul Enterprise binary -* A partition that corresponds to the configuration entry. As in, the partition exports config entry for partition "frontend" requires that the "frontend" partition exists +* A partition that corresponds to the configuration entry. As in, the exported services config entry for partition "frontend" requires that the "frontend" partition exists ## Usage @@ -36,7 +36,7 @@ You can configure the settings defined in the `exported-services` configuration Configure the following parameters to define a `exported-services` configuration entry: - + ```hcl diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 1d0a3fb6c..47f87740b 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -151,7 +151,7 @@ "path": "connect/config-entries/mesh" }, { - "title": "Partition Exports", + "title": "Exported Services", "path": "connect/config-entries/exported-services" }, { From ca7e087e571b0cad8e48295e7c3b62f907c325f8 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 10:09:44 +0000 Subject: [PATCH 33/60] ui: Add documentation link for partitions (#11668) --- .changelog/11668.txt | 3 +++ .../app/templates/dc/partitions/index.hbs | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 .changelog/11668.txt diff --git a/.changelog/11668.txt b/.changelog/11668.txt new file mode 100644 index 000000000..314bbfe57 --- /dev/null +++ b/.changelog/11668.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Add documentation link to Partition empty state +``` diff --git a/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs b/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs index 363bf4a5b..53f96bb0d 100644 --- a/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs +++ b/ui/packages/consul-partitions/app/templates/dc/partitions/index.hbs @@ -121,10 +121,12 @@ as |route|> - From 77757739796889e5b89917a8928df45986dc1f3f Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 10:22:09 +0000 Subject: [PATCH 34/60] ui: Adds basic support for the Routing tab viz with partitions (#11679) --- .changelog/11679.txt | 3 + .../consul/discovery-chain/README.mdx | 18 +++++ .../consul/discovery-chain/index.js | 5 +- .../consul/discovery-chain/utils.js | 24 ++++-- .../consul-ui/mock-api/v1/discovery-chain/_ | 17 +++-- .../mock-api/v1/internal/ui/services | 3 + .../get-alternate-services-test.js | 16 ++-- .../discovery-chain/get-resolvers-test.js | 76 +++++++++++-------- 8 files changed, 107 insertions(+), 55 deletions(-) create mode 100644 .changelog/11679.txt create mode 100644 ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx diff --git a/.changelog/11679.txt b/.changelog/11679.txt new file mode 100644 index 000000000..daf39eb84 --- /dev/null +++ b/.changelog/11679.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds support for partitions to the Routing visualization. +``` diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx b/ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx new file mode 100644 index 000000000..0a25754cb --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/README.mdx @@ -0,0 +1,18 @@ +--- +type: ember +state: needs-love +--- +# Consul::DiscoveryChain + +Mainly presentational component to visualize a discovery-chain. + +```hbs preview-template + +{{#if source.data}} + +{{/if}} + +``` + diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js b/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js index 31842c7de..8b16e5b5c 100644 --- a/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/index.js @@ -44,8 +44,8 @@ export default Component.extend({ !routes.find(item => typeof item.Definition === 'undefined') ) { let nextNode; - const resolverID = `resolver:${this.chain.ServiceName}.${this.chain.Namespace}.${this.chain.Datacenter}`; - const splitterID = `splitter:${this.chain.ServiceName}.${this.chain.Namespace}`; + const resolverID = `resolver:${this.chain.ServiceName}.${this.chain.Namespace}.${this.chain.Partition}.${this.chain.Datacenter}`; + const splitterID = `splitter:${this.chain.ServiceName}.${this.chain.Namespace}.${this.chain.Partition}`; // The default router should look for a splitter first, // if there isn't one try the default resolver if (typeof this.chain.Nodes[splitterID] !== 'undefined') { @@ -106,6 +106,7 @@ export default Component.extend({ resolvers: computed('chain.{Nodes,Targets}', function() { return getResolvers( this.chain.Datacenter, + this.chain.Partition, this.chain.Namespace, get(this, 'chain.Targets'), get(this, 'chain.Nodes') diff --git a/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js b/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js index 4dd42d1ff..e7698caa9 100644 --- a/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js +++ b/ui/packages/consul-ui/app/components/consul/discovery-chain/utils.js @@ -1,10 +1,10 @@ const getNodesByType = function(nodes = {}, type) { return Object.values(nodes).filter(item => item.Type === type); }; -const findResolver = function(resolvers, service, nspace = 'default', dc) { +const findResolver = function(resolvers, service, nspace = 'default', partition = 'default', dc) { if (typeof resolvers[service] === 'undefined') { resolvers[service] = { - ID: `${service}.${nspace}.${dc}`, + ID: `${service}.${nspace}.${partition}.${dc}`, Name: service, Children: [], }; @@ -19,7 +19,7 @@ export const getAlternateServices = function(targets, a) { // we might have more data from the endpoint so we don't have to guess // right now the backend also doesn't support dots in service names const [aRev, bRev] = [a, b].map(item => item.split('.').reverse()); - const types = ['Datacenter', 'Namespace', 'Service', 'Subset']; + const types = ['Datacenter', 'Partition', 'Namespace', 'Service', 'Subset']; return bRev.find(function(item, i) { const res = item !== aRev[i]; if (res) { @@ -61,7 +61,13 @@ export const getRoutes = function(nodes, uid) { ); }, []); }; -export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes = {}) { +export const getResolvers = function( + dc, + partition = 'default', + nspace = 'default', + targets = {}, + nodes = {} +) { const resolvers = {}; // make all our resolver nodes Object.values(nodes) @@ -70,7 +76,7 @@ export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes const parts = item.Name.split('.'); let subset; // this will leave behind the service.name.nspace.dc even if the service name contains a dot - if (parts.length > 3) { + if (parts.length > 4) { subset = parts.shift(); } parts.reverse(); @@ -79,10 +85,12 @@ export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes parts.shift(); // const nodeNspace = parts.shift(); + // const nodePartition = + parts.shift(); // if it does contain a dot put it back to the correct order parts.reverse(); const service = parts.join('.'); - const resolver = findResolver(resolvers, service, nspace, dc); + const resolver = findResolver(resolvers, service, nspace, partition, dc); let failovers; if (typeof item.Resolver.Failover !== 'undefined') { // figure out what type of failover this is @@ -108,12 +116,12 @@ export const getResolvers = function(dc, nspace = 'default', targets = {}, nodes // Failovers don't have a specific node if (typeof nodes[`resolver:${target.ID}`] !== 'undefined') { // We use this to figure out whether this target is a redirect target - const alternate = getAlternateServices([target.ID], `service.${nspace}.${dc}`); + const alternate = getAlternateServices([target.ID], `service.${nspace}.${partition}.${dc}`); // as Failovers don't make it here, we know anything that has alternateServices // must be a redirect if (alternate.Type !== 'Service') { // find the already created resolver - const resolver = findResolver(resolvers, target.Service, nspace, dc); + const resolver = findResolver(resolvers, target.Service, nspace, partition, dc); // and add the redirect as a child, redirects are always children const child = { Redirect: true, diff --git a/ui/packages/consul-ui/mock-api/v1/discovery-chain/_ b/ui/packages/consul-ui/mock-api/v1/discovery-chain/_ index f5f5d0109..fe51860d2 100644 --- a/ui/packages/consul-ui/mock-api/v1/discovery-chain/_ +++ b/ui/packages/consul-ui/mock-api/v1/discovery-chain/_ @@ -2,7 +2,9 @@ ${ [1].map(() => { const namespaces = ['default']; + const partitions = ['default']; const ns = location.search.ns || 'default'; + const partition = location.search.partition || 'default'; const dc = location.search.dc; const service = location.pathname.get(2); @@ -75,7 +77,7 @@ ${ const service = fake.hacker.noun().split(' ').join('-'); return { ServiceName: service, - Name: `${service}.${ns}.${dc}`, + Name: `${service}.${ns}.${partition}.${dc}`, Subsets: range( env( 'CONSUL_SUBSET_COUNT', @@ -96,7 +98,7 @@ ${ const service = resolvers[i].ServiceName; return { ServiceName: service, - Name: `${service}.${ns}.redirect-${dc}`, + Name: `${service}.${ns}.${partition}.redirect-${dc}`, Subsets: [] }; }); @@ -129,7 +131,7 @@ ${ const splitters = range( splitterCount ).map(() => ({ - Name: `${service}-${fake.hacker.noun()}.${ns}`, + Name: `${service}-${fake.hacker.noun()}.${ns}.${partition}`, Splits: range( splitCount ).map((item, i, arr) => ({ @@ -151,6 +153,7 @@ ${ "Chain": { "ServiceName": "${service}", "Namespace": "${ns}", + "Partition": "${partition}", "Datacenter": "${dc}", "Protocol": "http", "StartNode": "router:${service}", @@ -247,7 +250,8 @@ ${resolvers.map((resolver) => { ); const failover = ({ Datacenter: `${resolver.Name.replace(`.${dc}`, `.fail-${dc}`).replace(`.redirect-${dc}`, `.fail-${dc}`)}`, - Namespace: `${resolver.Name.replace(`.${ns}.`, `.fail-${ns}.`).replace(`.redirect-${ns}.`, `.fail-${ns}.`)}`, + Partition: `${resolver.Name.replace(`${ns}.${partition}.`, `${ns}.fail-${partition}.`).replace(`${ns}.redirect-${partition}.`, `${ns}.fail-${partition}.`)}`, + Namespace: `${resolver.Name.replace(`.${ns}.${partition}`, `.fail-${ns}.${partition}`).replace(`.redirect-${ns}.${partition}`, `.fail-${ns}.${partition}`)}`, })[env('CONSUL_FAILOVER_TYPE', 'Datacenter')]; return ` @@ -272,7 +276,8 @@ ${resolver.Subsets.map((subset) => { const id = `${subset}.${resolver.Name}`; const failover = ({ Datacenter: `${subset}.${resolver.Name.replace(`.${dc}`, `.fail-${dc}`)}`, - Namespace: `${subset}.${resolver.Name.replace(`.${ns}.`, `.fail-${ns}.`)}`, + Partition: `${subset}.${resolver.Name.replace(`${ns}.${partition}.`, `${ns}.fail-${partition}.`)}`, + Namespace: `${subset}.${resolver.Name.replace(`.${ns}.${partition}`, `.fail-${ns}.${partition}`)}`, })[env('CONSUL_FAILOVER_TYPE', 'Datacenter')]; return ` @@ -321,6 +326,7 @@ ${resolvers.map(item => { "ID": "${item.Name}", "Service": "${item.ServiceName}", "Namespace": "${ns}", + "Partition": "${partition}", "Datacenter": "${dc}", "MeshGateway": {}, "SNI": "${name}", @@ -335,6 +341,7 @@ ${item.Subsets.map(ktem => { "Service": "${item.ServiceName}", "ServiceSubset": "${ktem}", "Namespace": "${ns}", + "Partition": "${partition}", "Datacenter": "${dc}", "MeshGateway": { }, diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/services b/ui/packages/consul-ui/mock-api/v1/internal/ui/services index 66b7d9fd7..592e4f4f6 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/services +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/services @@ -45,6 +45,9 @@ ${[0].map( "Name":"${name}", ${typeof location.search.ns !== 'undefined' ? ` "Namespace": "${location.search.ns}", +` : ``} +${typeof location.search.partition !== 'undefined' ? ` + "Partition": "${location.search.ns}", ` : ``} "Tags": [ ${ diff --git a/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-alternate-services-test.js b/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-alternate-services-test.js index 53e986cd3..f33af1b89 100644 --- a/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-alternate-services-test.js +++ b/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-alternate-services-test.js @@ -8,8 +8,8 @@ module('Unit | Component | consul/discovery-chain/get-alternative-services', fun Targets: ['different-ns', 'different-ns2'], }; const actual = getAlternateServices( - ['service.different-ns.dc', 'service.different-ns2.dc'], - 'service.namespace.dc' + ['service.different-ns.partition.dc', 'service.different-ns2.partition.dc'], + 'service.namespace.partition.dc' ); assert.equal(actual.Type, expected.Type); assert.deepEqual(actual.Targets, expected.Targets); @@ -20,8 +20,8 @@ module('Unit | Component | consul/discovery-chain/get-alternative-services', fun Targets: ['dc1', 'dc2'], }; const actual = getAlternateServices( - ['service.namespace.dc1', 'service.namespace.dc2'], - 'service.namespace.dc' + ['service.namespace.partition.dc1', 'service.namespace.partition.dc2'], + 'service.namespace.partition.dc' ); assert.equal(actual.Type, expected.Type); assert.deepEqual(actual.Targets, expected.Targets); @@ -32,8 +32,8 @@ module('Unit | Component | consul/discovery-chain/get-alternative-services', fun Targets: ['service-2', 'service-3'], }; const actual = getAlternateServices( - ['service-2.namespace.dc', 'service-3.namespace.dc'], - 'service.namespace.dc' + ['service-2.namespace.partition.dc', 'service-3.namespace.partition.dc'], + 'service.namespace.partition.dc' ); assert.equal(actual.Type, expected.Type); assert.deepEqual(actual.Targets, expected.Targets); @@ -44,8 +44,8 @@ module('Unit | Component | consul/discovery-chain/get-alternative-services', fun Targets: ['v3', 'v2'], }; const actual = getAlternateServices( - ['v3.service.namespace.dc', 'v2.service.namespace.dc'], - 'v1.service.namespace.dc' + ['v3.service.namespace.partition.dc', 'v2.service.namespace.partition.dc'], + 'v1.service.namespace.partition.dc' ); assert.equal(actual.Type, expected.Type); assert.deepEqual(actual.Targets, expected.Targets); diff --git a/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-resolvers-test.js b/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-resolvers-test.js index 69dc9634b..e29898409 100644 --- a/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-resolvers-test.js +++ b/ui/packages/consul-ui/tests/unit/components/consul/discovery-chain/get-resolvers-test.js @@ -4,6 +4,7 @@ import { get } from 'consul-ui/tests/helpers/api'; const dc = 'dc-1'; const nspace = 'default'; +const partition = 'default'; const request = { url: `/v1/discovery-chain/service-name?dc=${dc}`, }; @@ -19,7 +20,7 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); const childId = Object.keys(Chain.Targets)[1]; const target = Chain.Targets[`${childId}`]; const firstChild = actual[0].Children[0]; @@ -39,7 +40,7 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); const childId = Object.keys(Chain.Targets)[1]; const target = Chain.Targets[`${childId}`]; const firstChild = actual[0].Children[0]; @@ -61,7 +62,7 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); const actualSubset = actual[0].Children[0]; assert.equal(actualSubset.Subset, true); assert.equal(actualSubset.Failover.Type, failoverType); @@ -71,7 +72,7 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { }); test('it assigns Failovers correctly', function(assert) { return Promise.all( - ['Datacenter', 'Namespace'].map(function(failoverType, i) { + ['Datacenter', 'Partition', 'Namespace'].map(function(failoverType, i) { return get(request.url, { headers: { cookie: { @@ -83,7 +84,7 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); const node = Chain.Nodes[`resolver:${Object.keys(Chain.Targets)[0]}`]; const expected = node.Resolver.Failover.Targets.map(item => item.split('.').reverse()[i]); assert.equal(actual[0].Failover.Type, failoverType); @@ -101,31 +102,36 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { Protocol: 'http', StartNode: '', Nodes: { - 'resolver:v2.dc-failover.default.dc-1': { + 'resolver:v2.dc-failover.default.default.dc-1': { Type: 'resolver', - Name: 'v2.dc-failover.default.dc-1', + Name: 'v2.dc-failover.default.default.dc-1', Resolver: { - Target: 'v2.dc-failover.default.dc-1', + Target: 'v2.dc-failover.default.default.dc-1', Failover: { - Targets: ['v2.dc-failover.default.dc-5', 'v2.dc-failover.default.dc-6'], + Targets: [ + 'v2.dc-failover.default.default.dc-5', + 'v2.dc-failover.default.default.dc-6', + ], }, }, }, }, Targets: { - 'v2.dc-failover.default.dc-1': { - ID: 'v2.dc-failover.default.dc-1', + 'v2.dc-failover.default.default.dc-1': { + ID: 'v2.dc-failover.default.default.dc-1', Service: 'dc-failover', Namespace: 'default', + Partition: 'default', Datacenter: 'dc-1', Subset: { Filter: '', }, }, - 'v2.dc-failover.default.dc-6': { - ID: 'v2.dc-failover.default.dc-6', + 'v2.dc-failover.default.default.dc-6': { + ID: 'v2.dc-failover.default.default.dc-6', Service: 'dc-failover', Namespace: 'default', + Partition: 'default', Datacenter: 'dc-6', Subset: { Filter: '', @@ -134,14 +140,14 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); const expected = { - ID: 'dc-failover.default.dc-1', + ID: 'dc-failover.default.default.dc-1', Name: 'dc-failover', Children: [ { Subset: true, - ID: 'v2.dc-failover.default.dc-1', + ID: 'v2.dc-failover.default.default.dc-1', Name: 'v2', Failover: { Type: 'Datacenter', @@ -162,30 +168,31 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { Protocol: 'http', StartNode: '', Nodes: { - 'resolver:dc-failover.default.dc-1': { + 'resolver:dc-failover.default.default.dc-1': { Type: 'resolver', - Name: 'dc-failover.default.dc-1', + Name: 'dc-failover.default.default.dc-1', Resolver: { - Target: 'dc-failover.default.dc-1', + Target: 'dc-failover.default.default.dc-1', Failover: { - Targets: ['dc-failover.default.dc-5', 'dc-failover.default.dc-6'], + Targets: ['dc-failover.default.default.dc-5', 'dc-failover.default.default.dc-6'], }, }, }, }, Targets: { - 'dc-failover.default.dc-1': { - ID: 'dc-failover.default.dc-1', + 'dc-failover.default.default.dc-1': { + ID: 'dc-failover.default.default.dc-1', Service: 'dc-failover', Namespace: 'default', + Partition: 'default', Datacenter: 'dc-1', }, }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); const expected = { - ID: 'dc-failover.default.dc-1', + ID: 'dc-failover.default.default.dc-1', Name: 'dc-failover', Children: [], Failover: { @@ -201,37 +208,42 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { Chain: { ServiceName: 'service-name', Namespace: 'default', + Partition: 'default', Datacenter: 'dc-1', Protocol: 'http', StartNode: '', Nodes: { - 'resolver:dc-failover.default.redirect-dc-1': { + 'resolver:dc-failover.default.default.redirect-dc-1': { Type: 'resolver', - Name: 'dc-failover.default.redirect-dc-1', + Name: 'dc-failover.default.default.redirect-dc-1', Resolver: { - Target: 'dc-failover.default.redirect-dc-1', + Target: 'dc-failover.default.default.redirect-dc-1', Failover: { - Targets: ['dc-failover.default.redirect-dc-5', 'dc-failover.default.redirect-dc-6'], + Targets: [ + 'dc-failover.default.default.redirect-dc-5', + 'dc-failover.default.default.redirect-dc-6', + ], }, }, }, }, Targets: { 'dc-failover.default.redirect-dc-1': { - ID: 'dc-failover.default.redirect-dc-1', + ID: 'dc-failover.default.default.redirect-dc-1', Service: 'dc-failover', Namespace: 'default', + Partition: 'default', Datacenter: 'redirect-dc-1', }, }, }, }).then(function({ Chain }) { - const actual = getResolvers(dc, nspace, Chain.Targets, Chain.Nodes); + const actual = getResolvers(dc, partition, nspace, Chain.Targets, Chain.Nodes); // Both the parent and the child should have a Failover property // as in order for a redirect to have failovers it must redirect to a // service that already has failovers const expected = { - ID: 'dc-failover.default.dc-1', + ID: 'dc-failover.default.default.dc-1', Name: 'dc-failover', Failover: { Targets: ['redirect-dc-5', 'redirect-dc-6'], @@ -243,7 +255,7 @@ module('Unit | Component | consul/discovery-chain/get-resolvers', function() { Targets: ['redirect-dc-5', 'redirect-dc-6'], Type: 'Datacenter', }, - ID: 'dc-failover.default.redirect-dc-1', + ID: 'dc-failover.default.default.redirect-dc-1', Name: 'redirect-dc-1', Redirect: true, }, From a90a65c9d8d78190405774d2f06edd3c4fbb1e83 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 10:33:44 +0000 Subject: [PATCH 35/60] ui: Adds partition support to Service and Node Identity templates (#11696) --- .changelog/11696.txt | 3 ++ .../app/components/code-editor/README.mdx | 6 +-- .../app/components/code-editor/index.hbs | 13 ++++++- .../consul/node-identity/template/README.mdx | 27 +++++++++++++ .../consul/node-identity/template/index.hbs | 28 +++++++++++++- .../service-identity/template/README.mdx | 29 ++++++++++++++ .../service-identity/template/index.hbs | 38 ++++++++++++++++++- .../app/components/inline-code/README.mdx | 16 ++++++++ .../app/components/inline-code/layout.scss | 4 ++ .../app/components/inline-code/skin.scss | 1 + .../app/components/policy-form/index.hbs | 14 +++---- .../app/components/policy-selector/index.hbs | 2 + ui/packages/consul-ui/app/styles/debug.scss | 19 ++++++---- 13 files changed, 173 insertions(+), 27 deletions(-) create mode 100644 .changelog/11696.txt create mode 100644 ui/packages/consul-ui/app/components/consul/node-identity/template/README.mdx create mode 100644 ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx diff --git a/.changelog/11696.txt b/.changelog/11696.txt new file mode 100644 index 000000000..5723a5a74 --- /dev/null +++ b/.changelog/11696.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds support for partitions to Service and Node Identity template visuals. +``` diff --git a/ui/packages/consul-ui/app/components/code-editor/README.mdx b/ui/packages/consul-ui/app/components/code-editor/README.mdx index cdf13110f..6e38d2d45 100644 --- a/ui/packages/consul-ui/app/components/code-editor/README.mdx +++ b/ui/packages/consul-ui/app/components/code-editor/README.mdx @@ -16,11 +16,7 @@ state: needs-love Rules (HCL Format) <:content> - - + {"content": "Initial Content"} ``` diff --git a/ui/packages/consul-ui/app/components/code-editor/index.hbs b/ui/packages/consul-ui/app/components/code-editor/index.hbs index d994307bd..f5594c3d4 100644 --- a/ui/packages/consul-ui/app/components/code-editor/index.hbs +++ b/ui/packages/consul-ui/app/components/code-editor/index.hbs @@ -18,11 +18,20 @@ {{mode.name}}
- + {{/if}} {{/if}} - +
{{#if (has-block "content")}}{{yield to="content"}}{{else}}{{value}}{{/if}}
diff --git a/ui/packages/consul-ui/app/components/consul/node-identity/template/README.mdx b/ui/packages/consul-ui/app/components/consul/node-identity/template/README.mdx new file mode 100644 index 000000000..7c0ab1c86 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/node-identity/template/README.mdx @@ -0,0 +1,27 @@ +# Consul::Node::Identity::Template + +The component is a text-only template that represents what a NodeIdentity +policy looks like. The policy generated here is **not** what is sent back to +the backend, instead its just a visual representation of what happens in the +backend when you save a NodeIdentity. + +```hbs preview-template +
+``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `partition` | `string` | `default` | The name of the current partition | +| `name` | `string` | | The name of the policy the will be used to +interpolate the various policy names | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs b/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs index 54b3f2948..41dbefe82 100644 --- a/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/node-identity/template/index.hbs @@ -1,4 +1,27 @@ -{{#if (env "CONSUL_NSPACES_ENABLED")}} +{{#if (can "use partitions")~}} +partition "{{or @partition 'default'}}" { + {{#if (can "use nspaces")}} + namespace "default" { + node "{{@name}}" { + policy = "write" + } + } + namespace_prefix "" { + service_prefix "" { + policy = "read" + } + } + {{else}} + node "{{@name}}" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + {{/if}} +} +{{~else~}} +{{~#if (can "use nspaces")~}} namespace "default" { node "{{@name}}" { policy = "write" @@ -16,4 +39,5 @@ node "{{@name}}" { service_prefix "" { policy = "read" } -{{/if}} \ No newline at end of file +{{~/if~}} +{{~/if~}} \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx b/ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx new file mode 100644 index 000000000..89eb4cb25 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/service-identity/template/README.mdx @@ -0,0 +1,29 @@ +# Consul::ServiceIdentity::Template + +The component is a text-only template that represents what a NodeIdentity +policy looks like. The policy generated here is **not** what is sent back to +the backend, instead its just a visual representation of what happens in the +backend when you save a NodeIdentity. + +```hbs preview-template +
+``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `nspace` | `string` | `default` | The name of the current namespace | +| `partition` | `string` | `default` | The name of the current partition | +| `name` | `string` | | The name of the policy the will be used to +interpolate the various policy names | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs b/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs index 3a1553305..2c1de515c 100644 --- a/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service-identity/template/index.hbs @@ -1,5 +1,38 @@ -{{#if (env "CONSUL_NSPACES_ENABLED")}} -namespace "{{@nspace}}" { +{{#if (can "use partitions")}} +partition "{{or @partition 'default'}}" { + {{#if (can 'use nspaces')}} + namespace "{{or @nspace 'default'}}" { + service "{{@name}}" { + policy = "write" + } + service "{{@name}}-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + } + {{else}} + service "{{@name}}" { + policy = "write" + } + service "{{@name}}-sidecar-proxy" { + policy = "write" + } + service_prefix "" { + policy = "read" + } + node_prefix "" { + policy = "read" + } + {{/if}} +} +{{else}} +{{#if (can 'use nspaces')}} +namespace "{{or @nspace 'default'}}" { service "{{@name}}" { policy = "write" } @@ -26,4 +59,5 @@ service_prefix "" { node_prefix "" { policy = "read" } +{{/if}} {{/if}} \ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/inline-code/README.mdx b/ui/packages/consul-ui/app/components/inline-code/README.mdx index 20761809e..afdd5cbdb 100644 --- a/ui/packages/consul-ui/app/components/inline-code/README.mdx +++ b/ui/packages/consul-ui/app/components/inline-code/README.mdx @@ -20,3 +20,19 @@ p code { @extend %inline-code; } ``` + +We also have a `%block-code` that is currently contained in the same CSS files here as it shares so much of this (this should potentially be thought about and reorganized at some stage) + +At the time of writing we only use this in the docs for all `
`s.
+
+```hbs preview-template
+
{
+  "Code": true
+}
+``` + +```css +pre code { + @extend %block-code; +} +``` diff --git a/ui/packages/consul-ui/app/components/inline-code/layout.scss b/ui/packages/consul-ui/app/components/inline-code/layout.scss index e23fbfe00..499d70d31 100644 --- a/ui/packages/consul-ui/app/components/inline-code/layout.scss +++ b/ui/packages/consul-ui/app/components/inline-code/layout.scss @@ -2,3 +2,7 @@ display: inline-block; padding: 0 4px; } +%block-code { + display: block; + padding: 0 8px; +} diff --git a/ui/packages/consul-ui/app/components/inline-code/skin.scss b/ui/packages/consul-ui/app/components/inline-code/skin.scss index 133d1ede3..3fb061925 100644 --- a/ui/packages/consul-ui/app/components/inline-code/skin.scss +++ b/ui/packages/consul-ui/app/components/inline-code/skin.scss @@ -1,3 +1,4 @@ +%block-code, %inline-code { border: 1px solid; color: rgb(var(--tone-brand-600)); diff --git a/ui/packages/consul-ui/app/components/policy-form/index.hbs b/ui/packages/consul-ui/app/components/policy-form/index.hbs index 61e2fa908..da1f05479 100644 --- a/ui/packages/consul-ui/app/components/policy-form/index.hbs +++ b/ui/packages/consul-ui/app/components/policy-form/index.hbs @@ -47,12 +47,11 @@ <:label> Rules (HCL Format) - <:content> - - + /> {{else if (eq item.template 'node-identity')}} Rules (HCL Format) - <:content> - - + @partition={{partition}} + /> {{else}} @@ -140,6 +141,7 @@ <:content> diff --git a/ui/packages/consul-ui/app/styles/debug.scss b/ui/packages/consul-ui/app/styles/debug.scss index 878aac643..5ca08667b 100644 --- a/ui/packages/consul-ui/app/styles/debug.scss +++ b/ui/packages/consul-ui/app/styles/debug.scss @@ -31,24 +31,24 @@ @extend %p3; text-align: center; } -[id^="docfy-demo-preview-color"] ul, -[id^="docfy-demo-preview-typography"] ul, -[id^="docfy-demo-preview-icons"] ul { +[id^='docfy-demo-preview-color'] ul, +[id^='docfy-demo-preview-typography'] ul, +[id^='docfy-demo-preview-icons'] ul { @extend %debug-grid; } -[id^="docfy-demo-preview-typography"] figure, -[id^="docfy-demo-preview-icons"] figure { +[id^='docfy-demo-preview-typography'] figure, +[id^='docfy-demo-preview-icons'] figure { border: var(--decor-border-100); border-color: rgb(var(--tone-gray-300)); height: 80px; } -[id^="docfy-demo-preview-icons"] figure::before { +[id^='docfy-demo-preview-icons'] figure::before { position: relative; top: 20px; } -[id^="docfy-demo-preview-color"] figure { +[id^='docfy-demo-preview-color'] figure { height: 40px; } #docfy-demo-preview-color0 { @@ -60,7 +60,7 @@ @extend %theme-dark; } -[id^="docfy-demo-preview-typography"] { +[id^='docfy-demo-preview-typography'] { .debug-h000 { @extend %h000; } @@ -248,6 +248,9 @@ html.with-route-announcer .route-title { figcaption code { @extend %inline-code; } + pre code { + @extend %block-code; + } figure > [type='text'] { border: 1px solid rgb(var(--tone-gray-999)); width: 100%; From 442df6d27d736e53c6dc00f68f8418727c767d93 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 11:06:33 +0000 Subject: [PATCH 36/60] ui: Adds basic support for partition exports to Service listings (#11702) Also: * ui: Add cross partition linking and rollout BucketList (#11712) * ui: Add exported service partition to the source filter menu (#11727) --- .changelog/11702.txt | 3 + .../components/consul/bucket/list/README.mdx | 44 ++++++++++++ .../components/consul/bucket/list/index.hbs | 39 +++++++++++ .../components/consul/bucket/list/index.scss | 26 +++++++ .../components/consul/service/list/README.mdx | 28 ++++++++ .../components/consul/service/list/index.hbs | 32 +++++---- .../consul/service/search-bar/index.hbs | 22 ++++++ .../consul/upstream-instance/list/index.hbs | 68 ++++++++----------- .../components/consul/upstream/list/index.hbs | 43 ++++-------- .../app/components/popover-select/index.scss | 9 ++- .../app/filter/predicates/service.js | 5 +- ui/packages/consul-ui/app/models/service.js | 10 ++- .../consul-ui/app/styles/components.scss | 1 + .../app/templates/dc/services/index.hbs | 30 ++++++-- .../dc/services/instance/upstreams.hbs | 6 +- .../templates/dc/services/show/services.hbs | 3 +- .../templates/dc/services/show/upstreams.hbs | 6 +- .../consul-ui/mock-api/v1/health/service/_ | 3 +- .../v1/internal/ui/gateway-services-nodes/_ | 3 + .../mock-api/v1/internal/ui/services | 2 +- .../consul-ui/translations/common/en-us.yaml | 3 +- 21 files changed, 286 insertions(+), 100 deletions(-) create mode 100644 .changelog/11702.txt create mode 100644 ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx create mode 100644 ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs create mode 100644 ui/packages/consul-ui/app/components/consul/bucket/list/index.scss create mode 100644 ui/packages/consul-ui/app/components/consul/service/list/README.mdx diff --git a/.changelog/11702.txt b/.changelog/11702.txt new file mode 100644 index 000000000..bd3b4f239 --- /dev/null +++ b/.changelog/11702.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds basic support for showing Services exported from another partition. +``` diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx b/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx new file mode 100644 index 000000000..939b220e8 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/README.mdx @@ -0,0 +1,44 @@ +# Consul::Bucket::List + +A presentational component for rendering a list of Consul 'buckets' +(a single partition and/or a single namepace). + +Please note this is not your usual "scrollable list component" more a list of +'buckets' that make up a partition / namespace combination. + +If only a the namespace is different to the currently selected namespace, then +the namespace will be displayed, whereas if the partition is different it will +show both the partition and namespace (as a namespace called 'team-1' in +`partition-1` is different to a namespace called 'team-1' in `partition-2`) + +If you don't need the nspace only support for the view you are building then +omit the `@nspace` argument. + +At the time of writing, this is not currently used across the entire UI +(specifically in intentions and maybe other areas) but eventually should be. + + +```hbs preview-template + + + +``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `item` | `array` | | A Consul object that could have both a `Partition` and a `Namespace` property | +| `nspace` | `string` | | The name of the current namespace | +| `partition` | `string` | | The name of the current partition | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs b/ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs new file mode 100644 index 000000000..d9520749b --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/index.hbs @@ -0,0 +1,39 @@ +{{#if (and @partition (can 'use partitions'))}} + {{#if (not-eq @item.Partition @partition)}} +
+
+ Admin Partition +
+
+ {{@item.Partition}} +
+
+ Namespace +
+
+ {{@item.Namespace}} +
+
+ {{/if}} +{{else if (and @nspace (can 'use nspace'))}} + {{#if (not-eq @item.Namespace @nspace)}} +
+
+ Namespace +
+
+ {{@item.Namespace}} +
+
+ {{/if}} +{{/if}} + diff --git a/ui/packages/consul-ui/app/components/consul/bucket/list/index.scss b/ui/packages/consul-ui/app/components/consul/bucket/list/index.scss new file mode 100644 index 000000000..826ef4841 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/bucket/list/index.scss @@ -0,0 +1,26 @@ +%consul-bucket-list { + & { + @extend %horizontal-kv-list; + } + .partition::before { + @extend %with-user-team-mask, %as-pseudo; + } + .nspace::before { + @extend %with-folder-outline-mask, %as-pseudo; + } + /* potential for some sort of %composite-kv thing here */ + .partition + dd::after { + display: inline-block; + content: '/'; + margin: 0 3px; + /*TODO: In isolation this is not needed */ + margin-right: 6px; + } + .partition + dd + .nspace { + margin-left: 0 !important; + } + /**/ +} +.consul-bucket-list { + @extend %consul-bucket-list; +} diff --git a/ui/packages/consul-ui/app/components/consul/service/list/README.mdx b/ui/packages/consul-ui/app/components/consul/service/list/README.mdx new file mode 100644 index 000000000..3ffb50404 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/service/list/README.mdx @@ -0,0 +1,28 @@ +# Consul::Service::List + +A presentational component for rendering a list of Consul Services. + +```hbs preview-template + + + +``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `items` | `array` | | An array of Consul Services | +| `nspace` | `string` | | The name of the current namespace | +| `partition` | `string` | | The name of the current partition | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/consul/service/list/index.hbs b/ui/packages/consul-ui/app/components/consul/service/list/index.hbs index fd5eaa568..f7643af0f 100644 --- a/ui/packages/consul-ui/app/components/consul/service/list/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service/list/index.hbs @@ -25,7 +25,18 @@ {{#if (gt item.InstanceCount 0)}} - + {{item.Name}} {{else}} @@ -35,20 +46,6 @@ {{/if}} -{{#if (and nspace (env 'CONSUL_NSPACES_ENABLED'))}} - {{#if (not-eq item.Namespace nspace)}} -
-
- - Namespace - -
-
- {{item.Namespace}} -
-
- {{/if}} -{{/if}} {{#if (and (not-eq item.InstanceCount 0) (and (not-eq item.Kind 'terminating-gateway') (not-eq item.Kind 'ingress-gateway'))) }} @@ -87,6 +84,11 @@ {{/if}} {{/if}} +
\ No newline at end of file diff --git a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs index 4c487e9f3..3b300dbbc 100644 --- a/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/service/search-bar/index.hbs @@ -140,11 +140,33 @@ as |key value|}} {{#let components.Optgroup components.Option as |Optgroup Option|}} +{{#let + (reject-by 'Partition' @partition @partitions) +as |nonDefaultPartitions|}} +{{#if (gt nonDefaultPartitions.length 0)}} + + {{#each @partitions as |partition|}} + + {{/each}} + +{{/if}} +{{/let}} + +{{#if (gt @sources.length 0)}} + {{#each @sources as |source|}} {{/each}} + +{{/if}} {{/let}} diff --git a/ui/packages/consul-ui/app/components/consul/upstream-instance/list/index.hbs b/ui/packages/consul-ui/app/components/consul/upstream-instance/list/index.hbs index 7c605bb1b..1894bba9e 100644 --- a/ui/packages/consul-ui/app/components/consul/upstream-instance/list/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/upstream-instance/list/index.hbs @@ -5,40 +5,26 @@
    {{#each @items as |item|}}
  • +

    {{item.DestinationName}}

    +
    -{{#if (can 'use partitions')}} - {{#if (not-eq item.DestinationType 'prepared_query')}} -
    -
    - Admin Partition -
    -
    - {{or item.DestinationPartition 'default'}} -
    -
    - {{/if}} -{{/if}} -{{#if (can 'use nspaces')}} - {{#if (not-eq item.DestinationType 'prepared_query')}} -
    -
    - Namespace -
    -
    - {{or item.DestinationNamespace 'default'}} -
    -
    - {{/if}} + +{{#if (not-eq item.DestinationType 'prepared_query')}} + {{/if}} + {{#if (and (not-eq item.Datacenter @dc) (not-eq item.Datacenter ""))}}
    {{/if}} + {{#if item.LocalBindSocketPath}}
    @@ -73,23 +60,26 @@
    {{else}} + {{#if (gt item.LocalBindPort 0)}} {{#let (concat (or item.LocalBindAddress '127.0.0.1') ':' item.LocalBindPort) as |combinedAddress|}} -
    -
    - Address -
    -
    - - {{combinedAddress}} -
    -
    +
    +
    + Address +
    +
    + + {{combinedAddress}} +
    +
    {{/let}} {{/if}} + {{/if}} +
  • {{/each}} diff --git a/ui/packages/consul-ui/app/components/consul/upstream/list/index.hbs b/ui/packages/consul-ui/app/components/consul/upstream/list/index.hbs index 66324a9be..caa2c5ac0 100644 --- a/ui/packages/consul-ui/app/components/consul/upstream/list/index.hbs +++ b/ui/packages/consul-ui/app/components/consul/upstream/list/index.hbs @@ -24,27 +24,21 @@ as |item index|> {{/if}} -{{#if (and (can 'use partitions') (not-eq item.Partition @partition))}} -
    -
    - Admin Partition -
    -
    - {{item.Partition}} -
    -
    -{{/if}} {{item.Name}} @@ -56,18 +50,11 @@ as |item index|> {{/if}} -{{#if (and (can 'use nspaces') (not-eq item.Namespace @nspace))}} -
    -
    - Namespace -
    -
    - {{item.Namespace}} -
    -
    -{{/if}} + {{#each item.GatewayConfig.Addresses as |address|}}
    diff --git a/ui/packages/consul-ui/app/components/popover-select/index.scss b/ui/packages/consul-ui/app/components/popover-select/index.scss index 71de6fd10..235fc643d 100644 --- a/ui/packages/consul-ui/app/components/popover-select/index.scss +++ b/ui/packages/consul-ui/app/components/popover-select/index.scss @@ -21,6 +21,8 @@ margin-right: 10px; } +/* TODO: Consider moving these to their specific search bard componets or */ +/* even their own search bar sub menu components */ %popover-select .value-passing button::before { @extend %with-check-circle-fill-mask, %as-pseudo; color: rgb(var(--tone-green-500)); @@ -37,12 +39,16 @@ @extend %with-minus-square-fill-mask, %as-pseudo; color: rgb(var(--tone-gray-400)); } -%popover-select.type-source li button { +%popover-select.type-source li:not(.partition) button { text-transform: capitalize; } %popover-select.type-source li.aws button { text-transform: uppercase; } +%popover-select.type-source li.partition button::before { + @extend %with-user-team-mask, %as-pseudo; + color: rgb(var(--tone-gray-500)); +} %popover-select .aws button::before { @extend %with-logo-aws-color-icon, %as-pseudo; } @@ -68,3 +74,4 @@ %popover-select .terraform button::before { @extend %with-logo-terraform-color-icon, %as-pseudo; } +/**/ diff --git a/ui/packages/consul-ui/app/filter/predicates/service.js b/ui/packages/consul-ui/app/filter/predicates/service.js index f9a7d4a1f..5ca36496a 100644 --- a/ui/packages/consul-ui/app/filter/predicates/service.js +++ b/ui/packages/consul-ui/app/filter/predicates/service.js @@ -20,6 +20,9 @@ export default { 'not-registered': (item, value) => item.InstanceCount === 0, }, source: (item, values) => { - return setHelpers.intersectionSize(values, new Set(item.ExternalSources || [])) !== 0; + return ( + setHelpers.intersectionSize(values, new Set(item.ExternalSources || [])) !== 0 || + values.includes(item.Partition) + ); }, }; diff --git a/ui/packages/consul-ui/app/models/service.js b/ui/packages/consul-ui/app/models/service.js index 1e470ab5c..31f0e2fc0 100644 --- a/ui/packages/consul-ui/app/models/service.js +++ b/ui/packages/consul-ui/app/models/service.js @@ -15,11 +15,17 @@ export const Collection = class Collection { } get ExternalSources() { - const sources = this.items.reduce(function(prev, item) { + const items = this.items.reduce(function(prev, item) { return prev.concat(item.ExternalSources || []); }, []); // unique, non-empty values, alpha sort - return [...new Set(sources)].filter(Boolean).sort(); + return [...new Set(items)].filter(Boolean).sort(); + } + // TODO: Think about when this/collections is worthwhile using and explain + // when and when not somewhere in the docs + get Partitions() { + // unique, non-empty values, alpha sort + return [...new Set(this.items.map(item => item.Partition))].sort(); } }; export default class Service extends Model { diff --git a/ui/packages/consul-ui/app/styles/components.scss b/ui/packages/consul-ui/app/styles/components.scss index a9b041af8..197e90963 100644 --- a/ui/packages/consul-ui/app/styles/components.scss +++ b/ui/packages/consul-ui/app/styles/components.scss @@ -75,6 +75,7 @@ @import 'consul-ui/components/consul/loader'; @import 'consul-ui/components/consul/tomography/graph'; @import 'consul-ui/components/consul/discovery-chain'; +@import 'consul-ui/components/consul/bucket/list'; @import 'consul-ui/components/consul/upstream/list'; @import 'consul-ui/components/consul/upstream-instance/list'; @import 'consul-ui/components/consul/health-check/list'; diff --git a/ui/packages/consul-ui/app/templates/dc/services/index.hbs b/ui/packages/consul-ui/app/templates/dc/services/index.hbs index b35b76fe0..27342bf3c 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/index.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/index.hbs @@ -53,7 +53,10 @@ as |route|> (reject-by 'Kind' 'connect-proxy' api.data) -as |sort filters items|}} + (or route.params.partition route.model.user.token.Partition 'default') + (or route.params.nspace route.model.user.token.Namespace 'default') + +as |sort filters items partition nspace|}} @@ -63,9 +66,12 @@ as |sort filters items|}} - {{#if (gt items.length 0) }} +{{#if (gt items.length 0) }} + {{#let (collection items) as |items|}} - {{/if}} + {{/let}} +{{/if}} @@ -115,10 +123,20 @@ as |sort filters items|}} diff --git a/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs b/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs index 41835a81b..526073d51 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/instance/upstreams.hbs @@ -20,12 +20,14 @@ as |route|> ) ) + (or route.params.partition route.model.user.token.Partition 'default') + (or route.params.nspace route.model.user.token.Namespace 'default') route.params.dc - route.params.nspace + route.model.proxy route.model.proxy.Service.Proxy.Upstreams - as |sort filters dc nspace proxy items|}} + as |sort filters partition nspace dc proxy items|}} {{#if (gt items.length 0)}} as |collection|> diff --git a/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs b/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs index 116e00bb3..72d86d0f7 100644 --- a/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs +++ b/ui/packages/consul-ui/app/templates/dc/services/show/upstreams.hbs @@ -42,11 +42,12 @@ as |route|> ) ) - route.params.nspace + (or route.params.partition route.model.user.token.Partition 'default') + (or route.params.nspace route.model.user.token.Namespace 'default') route.params.dc loader.data - as |sort filters nspace dc items|}} + as |sort filters partition nspace dc items|}} {{#if (gt items.length 0)}} @items={{collection.items}} @dc={{dc}} @nspace={{nspace}} + @partition={{partition}} > diff --git a/ui/packages/consul-ui/mock-api/v1/health/service/_ b/ui/packages/consul-ui/mock-api/v1/health/service/_ index 555d46f5e..3a33f731e 100644 --- a/ui/packages/consul-ui/mock-api/v1/health/service/_ +++ b/ui/packages/consul-ui/mock-api/v1/health/service/_ @@ -53,7 +53,7 @@ ${typeof location.search.ns !== 'undefined' ? ` "Namespace": "${location.search.ns}", ` : ``} ${typeof location.search.partition !== 'undefined' ? ` - "Partition": "${location.search.partition}", + "Partition": "${fake.helpers.randomize([env('CONSUL_PARTITION_EXPORTER', location.search.partition), location.search.partition])}", ` : ``} "Tags":[ ${ @@ -132,6 +132,7 @@ ${range(env('CONSUL_UPSTREAM_COUNT', 10)).map((item, j) => ` "Datacenter": "${fake.address.countryCode().toLowerCase()} ${ i % 2 ? "west" : "east"}-${j}", "DestinationName": "${fake.hacker.noun()}", "DestinationNamespace": "${fake.hacker.noun()}", + "DestinationPartition": "${fake.hacker.noun()}", "DestinationType": "${fake.helpers.randomize(['service', 'prepared_query'])}", ${fake.random.number({min: 1, max: 10}) > 5 ? ` "LocalBindAddress": "${fake.internet.ip()}", diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/gateway-services-nodes/_ b/ui/packages/consul-ui/mock-api/v1/internal/ui/gateway-services-nodes/_ index ef0f45ffa..3bd119146 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/gateway-services-nodes/_ +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/gateway-services-nodes/_ @@ -17,6 +17,9 @@ ${i === 1 ? ` ` : ` "Namespace": "${fake.hacker.noun()}-ns-${i}", `} +${typeof location.search.partition !== 'undefined' ? ` + "Partition": "${fake.helpers.randomize([env('CONSUL_PARTITION_EXPORTER', location.search.partition), location.search.partition])}", +` : ``} "Tags": [ ${ range(env('CONSUL_TAG_COUNT', fake.random.number(10))).map( diff --git a/ui/packages/consul-ui/mock-api/v1/internal/ui/services b/ui/packages/consul-ui/mock-api/v1/internal/ui/services index 592e4f4f6..9fb437fbc 100644 --- a/ui/packages/consul-ui/mock-api/v1/internal/ui/services +++ b/ui/packages/consul-ui/mock-api/v1/internal/ui/services @@ -47,7 +47,7 @@ ${typeof location.search.ns !== 'undefined' ? ` "Namespace": "${location.search.ns}", ` : ``} ${typeof location.search.partition !== 'undefined' ? ` - "Partition": "${location.search.ns}", + "Partition": "${fake.helpers.randomize([env('CONSUL_PARTITION_EXPORTER', location.search.partition), location.search.partition])}", ` : ``} "Tags": [ ${ diff --git a/ui/packages/consul-ui/translations/common/en-us.yaml b/ui/packages/consul-ui/translations/common/en-us.yaml index cefd9f949..fe1f462b4 100644 --- a/ui/packages/consul-ui/translations/common/en-us.yaml +++ b/ui/packages/consul-ui/translations/common/en-us.yaml @@ -46,6 +46,7 @@ search: critical: Failing in-mesh: In service mesh not-in-mesh: Not in service mesh + integrations: Integrations sort: alpha: asc: A to Z @@ -61,4 +62,4 @@ sort: desc: Shortest to longest status: asc: Unhealthy to Healthy - desc: Healthy to Unhealthy \ No newline at end of file + desc: Healthy to Unhealthy From 80422c0dfefab07b9c59666dfc1d2bee9d96abe4 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 6 Dec 2021 09:55:54 -0600 Subject: [PATCH 37/60] areas: make the gRPC server tracker network area aware (#11748) Fixes a bug whereby servers present in multiple network areas would be properly segmented in the Router, but not in the gRPC mirror. This would lead servers in the current datacenter leaving from a network area (possibly during the network area's removal) from deleting their own records that still exist in the standard WAN area. The gRPC client stack uses the gRPC server tracker to execute all RPCs, even those targeting members of the current datacenter (which is unlike the net/rpc stack which has a bypass mechanism). This would manifest as a gRPC method call never opening a socket because it would block forever waiting for the current datacenter's pool of servers to be non-empty. --- .changelog/11748.txt | 3 ++ agent/grpc/client_test.go | 21 +++++----- agent/grpc/resolver/resolver.go | 70 +++++++++++++++++++++++---------- agent/router/grpc.go | 13 +++--- agent/router/router.go | 10 ++--- 5 files changed, 76 insertions(+), 41 deletions(-) create mode 100644 .changelog/11748.txt diff --git a/.changelog/11748.txt b/.changelog/11748.txt new file mode 100644 index 000000000..8917ed93f --- /dev/null +++ b/.changelog/11748.txt @@ -0,0 +1,3 @@ +```release-note:bug +areas: **(Enterprise only)** make the gRPC server tracker network area aware +``` diff --git a/agent/grpc/client_test.go b/agent/grpc/client_test.go index 3fa90e218..0dae1e236 100644 --- a/agent/grpc/client_test.go +++ b/agent/grpc/client_test.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/consul/types" ) // useTLSForDcAlwaysTrue tell GRPC to always return the TLS is enabled @@ -33,7 +34,7 @@ func TestNewDialer_WithTLSWrapper(t *testing.T) { t.Cleanup(logError(t, lis.Close)) builder := resolver.NewServerResolverBuilder(newConfig(t)) - builder.AddServer(&metadata.Server{ + builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-1", ID: "ID1", Datacenter: "dc1", @@ -84,14 +85,14 @@ func TestNewDialer_WithALPNWrapper(t *testing.T) { }() builder := resolver.NewServerResolverBuilder(newConfig(t)) - builder.AddServer(&metadata.Server{ + builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-1", ID: "ID1", Datacenter: "dc1", Addr: lis1.Addr(), UseTLS: true, }) - builder.AddServer(&metadata.Server{ + builder.AddServer(types.AreaWAN, &metadata.Server{ Name: "server-2", ID: "ID2", Datacenter: "dc2", @@ -153,7 +154,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler(t *testing.T) { srv := newTestServer(t, "server-1", "dc1", tlsConf) md := srv.Metadata() - res.AddServer(md) + res.AddServer(types.AreaWAN, md) t.Cleanup(srv.shutdown) pool := NewClientConnPool(ClientConnPoolConfig{ @@ -211,7 +212,7 @@ func TestNewDialer_IntegrationWithTLSEnabledHandler_viaMeshGateway(t *testing.T) }() md := srv.Metadata() - res.AddServer(md) + res.AddServer(types.AreaWAN, md) t.Cleanup(srv.shutdown) clientTLSConf, err := tlsutil.NewConfigurator(tlsutil.Config{ @@ -266,7 +267,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newTestServer(t, name, "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -280,7 +281,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Failover(t *testing.T) { first, err := client.Something(ctx, &testservice.Req{}) require.NoError(t, err) - res.RemoveServer(&metadata.Server{ID: first.ServerName, Datacenter: "dc1"}) + res.RemoveServer(types.AreaWAN, &metadata.Server{ID: first.ServerName, Datacenter: "dc1"}) resp, err := client.Something(ctx, &testservice.Req{}) require.NoError(t, err) @@ -302,7 +303,7 @@ func TestClientConnPool_ForwardToLeader_Failover(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newTestServer(t, name, "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) servers = append(servers, srv) t.Cleanup(srv.shutdown) } @@ -352,7 +353,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_Rebalance(t *testing.T) { for i := 0; i < count; i++ { name := fmt.Sprintf("server-%d", i) srv := newTestServer(t, name, "dc1", nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) } @@ -406,7 +407,7 @@ func TestClientConnPool_IntegrationWithGRPCResolver_MultiDC(t *testing.T) { for _, dc := range dcs { name := "server-0-" + dc srv := newTestServer(t, name, dc, nil) - res.AddServer(srv.Metadata()) + res.AddServer(types.AreaWAN, srv.Metadata()) t.Cleanup(srv.shutdown) } diff --git a/agent/grpc/resolver/resolver.go b/agent/grpc/resolver/resolver.go index f6c3d7fe9..e77ee568d 100644 --- a/agent/grpc/resolver/resolver.go +++ b/agent/grpc/resolver/resolver.go @@ -10,6 +10,7 @@ import ( "google.golang.org/grpc/resolver" "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" ) // ServerResolverBuilder tracks the current server list and keeps any @@ -18,9 +19,9 @@ type ServerResolverBuilder struct { cfg Config // leaderResolver is used to track the address of the leader in the local DC. leaderResolver leaderResolver - // servers is an index of Servers by Server.ID. The map contains server IDs + // servers is an index of Servers by area and Server.ID. The map contains server IDs // for all datacenters. - servers map[string]*metadata.Server + servers map[types.AreaID]map[string]*metadata.Server // resolvers is an index of connections to the serverResolver which manages // addresses of servers for that connection. resolvers map[resolver.ClientConn]*serverResolver @@ -37,7 +38,7 @@ type Config struct { func NewServerResolverBuilder(cfg Config) *ServerResolverBuilder { return &ServerResolverBuilder{ cfg: cfg, - servers: make(map[string]*metadata.Server), + servers: make(map[types.AreaID]map[string]*metadata.Server), resolvers: make(map[resolver.ClientConn]*serverResolver), } } @@ -72,9 +73,11 @@ func (s *ServerResolverBuilder) ServerForGlobalAddr(globalAddr string) (*metadat s.lock.RLock() defer s.lock.RUnlock() - for _, server := range s.servers { - if DCPrefix(server.Datacenter, server.Addr.String()) == globalAddr { - return server, nil + for _, areaServers := range s.servers { + for _, server := range areaServers { + if DCPrefix(server.Datacenter, server.Addr.String()) == globalAddr { + return server, nil + } } } return nil, fmt.Errorf("failed to find Consul server for global address %q", globalAddr) @@ -138,11 +141,17 @@ func (s *ServerResolverBuilder) Authority() string { } // AddServer updates the resolvers' states to include the new server's address. -func (s *ServerResolverBuilder) AddServer(server *metadata.Server) { +func (s *ServerResolverBuilder) AddServer(areaID types.AreaID, server *metadata.Server) { s.lock.Lock() defer s.lock.Unlock() - s.servers[uniqueID(server)] = server + areaServers, ok := s.servers[areaID] + if !ok { + areaServers = make(map[string]*metadata.Server) + s.servers[areaID] = areaServers + } + + areaServers[uniqueID(server)] = server addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { @@ -168,11 +177,19 @@ func DCPrefix(datacenter, suffix string) string { } // RemoveServer updates the resolvers' states with the given server removed. -func (s *ServerResolverBuilder) RemoveServer(server *metadata.Server) { +func (s *ServerResolverBuilder) RemoveServer(areaID types.AreaID, server *metadata.Server) { s.lock.Lock() defer s.lock.Unlock() - delete(s.servers, uniqueID(server)) + areaServers, ok := s.servers[areaID] + if !ok { + return // already gone + } + + delete(areaServers, uniqueID(server)) + if len(areaServers) == 0 { + delete(s.servers, areaID) + } addrs := s.getDCAddrs(server.Datacenter) for _, resolver := range s.resolvers { @@ -185,18 +202,29 @@ func (s *ServerResolverBuilder) RemoveServer(server *metadata.Server) { // getDCAddrs returns a list of the server addresses for the given datacenter. // This method requires that lock is held for reads. func (s *ServerResolverBuilder) getDCAddrs(dc string) []resolver.Address { - var addrs []resolver.Address - for _, server := range s.servers { - if server.Datacenter != dc { - continue - } + var ( + addrs []resolver.Address + keptServerIDs = make(map[string]struct{}) + ) + for _, areaServers := range s.servers { + for _, server := range areaServers { + if server.Datacenter != dc { + continue + } - addrs = append(addrs, resolver.Address{ - // NOTE: the address persisted here is only dialable using our custom dialer - Addr: DCPrefix(server.Datacenter, server.Addr.String()), - Type: resolver.Backend, - ServerName: server.Name, - }) + // Servers may be part of multiple areas, so only include each one once. + if _, ok := keptServerIDs[server.ID]; ok { + continue + } + keptServerIDs[server.ID] = struct{}{} + + addrs = append(addrs, resolver.Address{ + // NOTE: the address persisted here is only dialable using our custom dialer + Addr: DCPrefix(server.Datacenter, server.Addr.String()), + Type: resolver.Backend, + ServerName: server.Name, + }) + } } return addrs } diff --git a/agent/router/grpc.go b/agent/router/grpc.go index c4fe96d25..44600d42a 100644 --- a/agent/router/grpc.go +++ b/agent/router/grpc.go @@ -1,13 +1,16 @@ package router -import "github.com/hashicorp/consul/agent/metadata" +import ( + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" +) // ServerTracker is called when Router is notified of a server being added or // removed. type ServerTracker interface { NewRebalancer(dc string) func() - AddServer(*metadata.Server) - RemoveServer(*metadata.Server) + AddServer(types.AreaID, *metadata.Server) + RemoveServer(types.AreaID, *metadata.Server) } // Rebalancer is called periodically to re-order the servers so that the load on the @@ -24,7 +27,7 @@ func (NoOpServerTracker) NewRebalancer(string) func() { } // AddServer does nothing -func (NoOpServerTracker) AddServer(*metadata.Server) {} +func (NoOpServerTracker) AddServer(types.AreaID, *metadata.Server) {} // RemoveServer does nothing -func (NoOpServerTracker) RemoveServer(*metadata.Server) {} +func (NoOpServerTracker) RemoveServer(types.AreaID, *metadata.Server) {} diff --git a/agent/router/router.go b/agent/router/router.go index 9aaae8739..1389a30f6 100644 --- a/agent/router/router.go +++ b/agent/router/router.go @@ -175,7 +175,7 @@ func (r *Router) AddArea(areaID types.AreaID, cluster RouterSerfCluster, pinger continue } - if err := r.addServer(area, parts); err != nil { + if err := r.addServer(areaID, area, parts); err != nil { return fmt.Errorf("failed to add server %q to area %q: %v", m.Name, areaID, err) } } @@ -276,7 +276,7 @@ func (r *Router) maybeInitializeManager(area *areaInfo, dc string) *Manager { } // addServer does the work of AddServer once the write lock is held. -func (r *Router) addServer(area *areaInfo, s *metadata.Server) error { +func (r *Router) addServer(areaID types.AreaID, area *areaInfo, s *metadata.Server) error { // Make the manager on the fly if this is the first we've seen of it, // and add it to the index. manager := r.maybeInitializeManager(area, s.Datacenter) @@ -288,7 +288,7 @@ func (r *Router) addServer(area *areaInfo, s *metadata.Server) error { } manager.AddServer(s) - r.grpcServerTracker.AddServer(s) + r.grpcServerTracker.AddServer(areaID, s) return nil } @@ -302,7 +302,7 @@ func (r *Router) AddServer(areaID types.AreaID, s *metadata.Server) error { if !ok { return fmt.Errorf("area ID %q does not exist", areaID) } - return r.addServer(area, s) + return r.addServer(areaID, area, s) } // RemoveServer should be called whenever a server is removed from an area. This @@ -324,7 +324,7 @@ func (r *Router) RemoveServer(areaID types.AreaID, s *metadata.Server) error { return nil } info.manager.RemoveServer(s) - r.grpcServerTracker.RemoveServer(s) + r.grpcServerTracker.RemoveServer(areaID, s) // If this manager is empty then remove it so we don't accumulate cruft // and waste time during request routing. From 08b100c1b16dd5af8c629d4f42e3415a349d7cdf Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 16:09:15 +0000 Subject: [PATCH 38/60] ui: Stop tables overlapping with their headers when scrolling (#11670) --- .changelog/11670.txt | 3 ++ .../consul/intention/list/README.mdx | 36 +++++++++++++++++++ .../components/tabular-collection/index.scss | 2 +- 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 .changelog/11670.txt create mode 100644 ui/packages/consul-ui/app/components/consul/intention/list/README.mdx diff --git a/.changelog/11670.txt b/.changelog/11670.txt new file mode 100644 index 000000000..f8f4fa5b6 --- /dev/null +++ b/.changelog/11670.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix visual issue with slight table header overflow +``` diff --git a/ui/packages/consul-ui/app/components/consul/intention/list/README.mdx b/ui/packages/consul-ui/app/components/consul/intention/list/README.mdx new file mode 100644 index 000000000..67f5c0695 --- /dev/null +++ b/ui/packages/consul-ui/app/components/consul/intention/list/README.mdx @@ -0,0 +1,36 @@ +# Consul::Intention::List + +A component for rendering Intentions. + + +There are some extra conextual components to use here due to how we detect +intention CRDs and make that easy to work with/add necessary notices. The +notice will only show if applicable, but the contextual component is used to +define where that is when it does display. + +```hbs preview-template + + + + + + + + +``` + +## Arguments + +| Argument/Attribute | Type | Default | Description | +| --- | --- | --- | --- | +| `items` | `array` | | An array of Intentions | +| `ondelete` | `function` | | An action to execute when the `Delete` action is clicked | + +## See + +- [Template Source Code](./index.hbs) + +--- diff --git a/ui/packages/consul-ui/app/components/tabular-collection/index.scss b/ui/packages/consul-ui/app/components/tabular-collection/index.scss index 300533879..c46301cc5 100644 --- a/ui/packages/consul-ui/app/components/tabular-collection/index.scss +++ b/ui/packages/consul-ui/app/components/tabular-collection/index.scss @@ -5,7 +5,7 @@ table.dom-recycling { %dom-recycling-table tbody { /* tbodys are all absolute so,*/ /* make room for the header */ - top: 29px !important; + top: 33px !important; /* Make room for the header, plus 20px for a margin on the bottom */ width: 100%; } From eaf4b64833ab7637a4e5c0293394ebcb4b1e833f Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 16:10:08 +0000 Subject: [PATCH 39/60] ui: remove old head.hbs file we don't need (#11678) --- ui/packages/consul-ui/app/templates/head.hbs | 1 - 1 file changed, 1 deletion(-) delete mode 100644 ui/packages/consul-ui/app/templates/head.hbs diff --git a/ui/packages/consul-ui/app/templates/head.hbs b/ui/packages/consul-ui/app/templates/head.hbs deleted file mode 100644 index d2eda4f16..000000000 --- a/ui/packages/consul-ui/app/templates/head.hbs +++ /dev/null @@ -1 +0,0 @@ -{{model.title}} From 8c8443390d593937a5d97ce3149274787de6abf5 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 6 Dec 2021 16:11:57 +0000 Subject: [PATCH 40/60] ui: Improve error messaging for when we can't make a slug (#11697) Ember Data requires the usage of unique ID to identify its records in the frontend, and we use a centralized function to do that for all records. There are occasions where it can't make an ID, usually this is a bug our side, but there are occasions where Consul might not be giving us the data needed to make an ID, for example if a Service comes down to us with a blank Name. Whilst this isn't a problem to be fixed in the UI, I thought we could make an improvement here by giving a little more info as to why the UI cannot make a unique ID. This is currently semi-hidden away in the javascript console, but we could potentially surface this in the UI itself as a larger task. I figured this smaller task could help folks in the meantime if they hit upon this as they might open up the javascript console themselves to see whats up and they'd at least get this extra clue. --- .../consul-ui/app/utils/create-fingerprinter.js | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ui/packages/consul-ui/app/utils/create-fingerprinter.js b/ui/packages/consul-ui/app/utils/create-fingerprinter.js index 263b349f2..d0f103dcf 100644 --- a/ui/packages/consul-ui/app/utils/create-fingerprinter.js +++ b/ui/packages/consul-ui/app/utils/create-fingerprinter.js @@ -5,14 +5,19 @@ export default function(foreignKey, nspaceKey, partitionKey, hash = JSON.stringi return function(item) { foreignKeyValue = foreignKeyValue == null ? item[foreignKey] : foreignKeyValue; if (foreignKeyValue == null) { - throw new Error('Unable to create fingerprint, missing foreignKey value'); + throw new Error( + `Unable to create fingerprint, missing foreignKey value. Looking for value in \`${foreignKey}\` got \`${foreignKeyValue}\`` + ); } const slugKeys = slugKey.split(','); const slugValues = slugKeys.map(function(slugKey) { - if (get(item, slugKey) == null || get(item, slugKey).length < 1) { - throw new Error('Unable to create fingerprint, missing slug'); + const slug = get(item, slugKey); + if (slug == null || slug.length < 1) { + throw new Error( + `Unable to create fingerprint, missing slug. Looking for value in \`${slugKey}\` got \`${slug}\`` + ); } - return get(item, slugKey); + return slug; }); // This ensures that all data objects have a Namespace and a Partition // value set, even in OSS. From 5ea4b82940ea116895efe681686a0714cb0a66e8 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 6 Dec 2021 13:18:02 -0600 Subject: [PATCH 41/60] light refactors to support making partitions and serf-based wan federation are mutually exclusive (#11755) --- .changelog/_1391.txt | 3 +++ agent/consul/enterprise_server_oss.go | 2 +- agent/consul/merge.go | 30 +++++++++++++++++++++- agent/consul/merge_test.go | 36 +++++++++++++++++++++++++-- agent/consul/server.go | 16 ++++++++++-- agent/consul/server_oss.go | 4 +++ agent/consul/server_serf.go | 16 +++++++++--- agent/router/serf_adapter.go | 34 ++++++++++++++++++++++--- 8 files changed, 128 insertions(+), 13 deletions(-) create mode 100644 .changelog/_1391.txt diff --git a/.changelog/_1391.txt b/.changelog/_1391.txt new file mode 100644 index 000000000..f1dbe4909 --- /dev/null +++ b/.changelog/_1391.txt @@ -0,0 +1,3 @@ +```release-note:feature +partitions: **(Enterprise only)** Ensure partitions and serf-based WAN federation are mutually exclusive. +``` diff --git a/agent/consul/enterprise_server_oss.go b/agent/consul/enterprise_server_oss.go index 85c1b26f4..cad141c11 100644 --- a/agent/consul/enterprise_server_oss.go +++ b/agent/consul/enterprise_server_oss.go @@ -88,7 +88,7 @@ func (s *Server) validateEnterpriseIntentionNamespace(ns string, _ bool) error { func (s *Server) setupSerfLAN(config *Config) error { var err error // Initialize the LAN Serf for the default network segment. - s.serfLAN, err = s.setupSerf(setupSerfOptions{ + s.serfLAN, _, err = s.setupSerf(setupSerfOptions{ Config: config.SerfLANConfig, EventCh: s.eventChLAN, SnapshotPath: serfLANSnapshot, diff --git a/agent/consul/merge.go b/agent/consul/merge.go index 04a41f0f5..306305881 100644 --- a/agent/consul/merge.go +++ b/agent/consul/merge.go @@ -2,6 +2,7 @@ package consul import ( "fmt" + "sync" "github.com/hashicorp/go-version" "github.com/hashicorp/serf/serf" @@ -86,14 +87,41 @@ func (md *lanMergeDelegate) NotifyMerge(members []*serf.Member) error { // ring. We check that the peers are server nodes and abort the merge // otherwise. type wanMergeDelegate struct { + localDatacenter string + + federationDisabledLock sync.Mutex + federationDisabled bool +} + +// SetWANFederationDisabled selectively disables the wan pool from accepting +// non-local members. If the toggle changed the current value it returns true. +func (md *wanMergeDelegate) SetWANFederationDisabled(disabled bool) bool { + md.federationDisabledLock.Lock() + prior := md.federationDisabled + md.federationDisabled = disabled + md.federationDisabledLock.Unlock() + + return prior != disabled } func (md *wanMergeDelegate) NotifyMerge(members []*serf.Member) error { + // Deliberately hold this lock during the entire merge so calls to + // SetWANFederationDisabled returning immediately imply that the flag takes + // effect for all future merges. + md.federationDisabledLock.Lock() + defer md.federationDisabledLock.Unlock() + for _, m := range members { - ok, _ := metadata.IsConsulServer(*m) + ok, srv := metadata.IsConsulServer(*m) if !ok { return fmt.Errorf("Member '%s' is not a server", m.Name) } + + if md.federationDisabled { + if srv.Datacenter != md.localDatacenter { + return fmt.Errorf("Member '%s' part of wrong datacenter '%s'; WAN federation is disabled", m.Name, srv.Datacenter) + } + } } return nil } diff --git a/agent/consul/merge_test.go b/agent/consul/merge_test.go index 7219edcab..1a8c57bd8 100644 --- a/agent/consul/merge_test.go +++ b/agent/consul/merge_test.go @@ -138,10 +138,16 @@ func TestMerge_WAN(t *testing.T) { type testcase struct { members []*serf.Member expect string + setupFn func(t *testing.T, delegate *wanMergeDelegate) } run := func(t *testing.T, tc testcase) { - delegate := &wanMergeDelegate{} + delegate := &wanMergeDelegate{ + localDatacenter: "dc1", + } + if tc.setupFn != nil { + tc.setupFn(t, delegate) + } err := delegate.NotifyMerge(tc.members) if tc.expect == "" { require.NoError(t, err) @@ -177,7 +183,33 @@ func TestMerge_WAN(t *testing.T) { build: "0.7.5", }), }, - expect: "", + }, + "federation disabled and local join allowed": { + setupFn: func(t *testing.T, delegate *wanMergeDelegate) { + delegate.SetWANFederationDisabled(true) + }, + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc1", + name: "node1", + server: true, + build: "0.7.5", + }), + }, + }, + "federation disabled and remote join blocked": { + setupFn: func(t *testing.T, delegate *wanMergeDelegate) { + delegate.SetWANFederationDisabled(true) + }, + members: []*serf.Member{ + makeTestNode(t, testMember{ + dc: "dc2", + name: "node1", + server: true, + build: "0.7.5", + }), + }, + expect: `WAN federation is disabled`, }, } diff --git a/agent/consul/server.go b/agent/consul/server.go index 554d5cbe5..cf2b0125c 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -189,6 +189,12 @@ type Server struct { // serf cluster that spans datacenters eventChWAN chan serf.Event + // wanMembershipNotifyCh is used to receive notifications that the the + // serfWAN wan pool may have changed. + // + // If this is nil, notification is skipped. + wanMembershipNotifyCh chan struct{} + // fsm is the state machine used with Raft to provide // strong consistency. fsm *fsm.FSM @@ -266,6 +272,7 @@ type Server struct { // serfWAN is the Serf cluster maintained between DC's // which SHOULD only consist of Consul servers serfWAN *serf.Serf + serfWANConfig *serf.Config memberlistTransportWAN wanfed.IngestionAwareTransport gatewayLocator *GatewayLocator @@ -493,7 +500,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) { // Initialize the WAN Serf if enabled if config.SerfWANConfig != nil { - s.serfWAN, err = s.setupSerf(setupSerfOptions{ + s.serfWAN, s.serfWANConfig, err = s.setupSerf(setupSerfOptions{ Config: config.SerfWANConfig, EventCh: s.eventChWAN, SnapshotPath: serfWANSnapshot, @@ -548,7 +555,7 @@ func NewServer(config *Config, flat Deps) (*Server, error) { s.Shutdown() return nil, fmt.Errorf("Failed to add WAN serf route: %v", err) } - go router.HandleSerfEvents(s.logger, s.router, types.AreaWAN, s.serfWAN.ShutdownCh(), s.eventChWAN) + go router.HandleSerfEvents(s.logger, s.router, types.AreaWAN, s.serfWAN.ShutdownCh(), s.eventChWAN, s.wanMembershipNotifyCh) // Fire up the LAN <-> WAN join flooder. addrFn := func(s *metadata.Server) (string, error) { @@ -1124,6 +1131,11 @@ func (s *Server) JoinWAN(addrs []string) (int, error) { if s.serfWAN == nil { return 0, ErrWANFederationDisabled } + + if err := s.enterpriseValidateJoinWAN(); err != nil { + return 0, err + } + return s.serfWAN.Join(addrs, true) } diff --git a/agent/consul/server_oss.go b/agent/consul/server_oss.go index f6217b999..7d4830d1b 100644 --- a/agent/consul/server_oss.go +++ b/agent/consul/server_oss.go @@ -19,6 +19,10 @@ import ( func (s *Server) registerEnterpriseGRPCServices(deps Deps, srv *grpc.Server) {} +func (s *Server) enterpriseValidateJoinWAN() error { + return nil // no-op +} + // JoinLAN is used to have Consul join the inner-DC pool The target address // should be another node inside the DC listening on the Serf LAN address func (s *Server) JoinLAN(addrs []string, entMeta *structs.EnterpriseMeta) (int, error) { diff --git a/agent/consul/server_serf.go b/agent/consul/server_serf.go index f5864f654..44c3f857a 100644 --- a/agent/consul/server_serf.go +++ b/agent/consul/server_serf.go @@ -48,12 +48,18 @@ type setupSerfOptions struct { } // setupSerf is used to setup and initialize a Serf -func (s *Server) setupSerf(opts setupSerfOptions) (*serf.Serf, error) { +func (s *Server) setupSerf(opts setupSerfOptions) (*serf.Serf, *serf.Config, error) { conf, err := s.setupSerfConfig(opts) if err != nil { - return nil, err + return nil, nil, err } - return serf.Create(conf) + + cluster, err := serf.Create(conf) + if err != nil { + return nil, nil, err + } + + return cluster, conf, nil } func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) { @@ -152,7 +158,9 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) { conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion] conf.RejoinAfterLeave = s.config.RejoinAfterLeave if opts.WAN { - conf.Merge = &wanMergeDelegate{} + conf.Merge = &wanMergeDelegate{ + localDatacenter: s.config.Datacenter, + } } else { conf.Merge = &lanMergeDelegate{ dc: s.config.Datacenter, diff --git a/agent/router/serf_adapter.go b/agent/router/serf_adapter.go index b051b2f96..7208fe123 100644 --- a/agent/router/serf_adapter.go +++ b/agent/router/serf_adapter.go @@ -1,10 +1,11 @@ package router import ( - "github.com/hashicorp/consul/agent/metadata" - "github.com/hashicorp/consul/types" "github.com/hashicorp/go-hclog" "github.com/hashicorp/serf/serf" + + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/types" ) // routerFn selects one of the router operations to map to incoming Serf events. @@ -50,7 +51,18 @@ func handleMemberEvent(logger hclog.Logger, fn routerFn, areaID types.AreaID, e // HandleSerfEvents is a long-running goroutine that pushes incoming events from // a Serf manager's channel into the given router. This will return when the // shutdown channel is closed. -func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, shutdownCh <-chan struct{}, eventCh <-chan serf.Event) { +// +// If membershipNotifyCh is non-nil, it must be a buffered channel of size one +// with one consumer. That consumer will be notified when +// Join/Leave/Failed/Update occur on this serf pool. +func HandleSerfEvents( + logger hclog.Logger, + router *Router, + areaID types.AreaID, + shutdownCh <-chan struct{}, + eventCh <-chan serf.Event, + membershipNotifyCh chan<- struct{}, +) { for { select { case <-shutdownCh: @@ -60,15 +72,19 @@ func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, switch e.EventType() { case serf.EventMemberJoin: handleMemberEvent(logger, router.AddServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) case serf.EventMemberLeave, serf.EventMemberReap: handleMemberEvent(logger, router.RemoveServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) case serf.EventMemberFailed: handleMemberEvent(logger, router.FailServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) case serf.EventMemberUpdate: handleMemberEvent(logger, router.AddServer, areaID, e) + notifyMembershipPossibleChange(membershipNotifyCh) // All of these event types are ignored. case serf.EventUser: @@ -80,3 +96,15 @@ func HandleSerfEvents(logger hclog.Logger, router *Router, areaID types.AreaID, } } } + +func notifyMembershipPossibleChange(membershipNotifyCh chan<- struct{}) { + if membershipNotifyCh == nil { + return + } + + // Notify if not already notified. + select { + case membershipNotifyCh <- struct{}{}: + default: + } +} From a1c1e36be79e0bfd8cb1928da7a196e89e75e2a1 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 12 Nov 2021 18:57:05 -0700 Subject: [PATCH 42/60] Allow cross-partition references in disco chain * Add partition fields to targets like service route destinations * Update validation to prevent cross-DC + cross-partition references * Handle partitions when reading config entries for disco chain * Encode partition in compiled targets --- agent/config/runtime_test.go | 2 + agent/consul/discoverychain/compile.go | 26 +-- agent/consul/discoverychain/compile_test.go | 156 +++++++++--------- agent/consul/state/config_entry.go | 46 ++++-- agent/consul/state/config_entry_test.go | 7 + agent/structs/config_entry_discoverychain.go | 97 ++++++++--- .../config_entry_discoverychain_test.go | 2 +- test/integration/connect/envoy/run-tests.sh | 16 ++ 8 files changed, 228 insertions(+), 124 deletions(-) diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 8abbcc403..e92fb9855 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/consul" @@ -4085,6 +4086,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { Service: "carrot", ServiceSubset: "kale", Namespace: "leek", + Partition: acl.DefaultPartitionName, PrefixRewrite: "/alternate", RequestTimeout: 99 * time.Second, NumRetries: 12345, diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index 99f4c357d..c795ef88c 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -185,7 +185,7 @@ type customizationMarkers struct { // the String() method on the type itself. It is this way to be more // consistent with other string ids within the discovery chain. func serviceIDString(sid structs.ServiceID) string { - return fmt.Sprintf("%s.%s", sid.ID, sid.NamespaceOrDefault()) + return fmt.Sprintf("%s.%s.%s", sid.ID, sid.NamespaceOrDefault(), sid.PartitionOrDefault()) } func (m *customizationMarkers) IsZero() bool { @@ -213,10 +213,10 @@ func (c *compiler) recordServiceProtocol(sid structs.ServiceID) error { if serviceDefault := c.entries.GetService(sid); serviceDefault != nil { return c.recordProtocol(sid, serviceDefault.Protocol) } - if c.entries.GlobalProxy != nil { + if proxyDefault := c.entries.GetProxyDefaults(sid.PartitionOrDefault()); proxyDefault != nil { var cfg proxyConfig // Ignore errors and fallback on defaults if it does happen. - _ = mapstructure.WeakDecode(c.entries.GlobalProxy.Config, &cfg) + _ = mapstructure.WeakDecode(proxyDefault.Config, &cfg) if cfg.Protocol != "" { return c.recordProtocol(sid, cfg.Protocol) } @@ -567,11 +567,12 @@ func (c *compiler) assembleChain() error { dest = &structs.ServiceRouteDestination{ Service: c.serviceName, Namespace: router.NamespaceOrDefault(), + Partition: router.PartitionOrDefault(), } } svc := defaultIfEmpty(dest.Service, c.serviceName) destNamespace := defaultIfEmpty(dest.Namespace, router.NamespaceOrDefault()) - destPartition := router.PartitionOrDefault() + destPartition := defaultIfEmpty(dest.Partition, router.PartitionOrDefault()) // Check to see if the destination is eligible for splitting. var ( @@ -602,7 +603,7 @@ func (c *compiler) assembleChain() error { } defaultRoute := &structs.DiscoveryRoute{ - Definition: newDefaultServiceRoute(router.Name, router.NamespaceOrDefault()), + Definition: newDefaultServiceRoute(router.Name, router.NamespaceOrDefault(), router.PartitionOrDefault()), NextNode: defaultDestinationNode.MapKey(), } routeNode.Routes = append(routeNode.Routes, defaultRoute) @@ -613,7 +614,7 @@ func (c *compiler) assembleChain() error { return nil } -func newDefaultServiceRoute(serviceName string, namespace string) *structs.ServiceRoute { +func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.ServiceRoute { return &structs.ServiceRoute{ Match: &structs.ServiceRouteMatch{ HTTP: &structs.ServiceRouteHTTPMatch{ @@ -623,6 +624,7 @@ func newDefaultServiceRoute(serviceName string, namespace string) *structs.Servi Destination: &structs.ServiceRouteDestination{ Service: serviceName, Namespace: namespace, + Partition: partition, }, } } @@ -836,7 +838,7 @@ RESOLVE_AGAIN: target, redirect.Service, redirect.ServiceSubset, - target.Partition, + redirect.Partition, redirect.Namespace, redirect.Datacenter, ) @@ -940,9 +942,9 @@ RESOLVE_AGAIN: if serviceDefault := c.entries.GetService(targetID); serviceDefault != nil { target.MeshGateway = serviceDefault.MeshGateway } - - if c.entries.GlobalProxy != nil && target.MeshGateway.Mode == structs.MeshGatewayModeDefault { - target.MeshGateway.Mode = c.entries.GlobalProxy.MeshGateway.Mode + proxyDefault := c.entries.GetProxyDefaults(targetID.PartitionOrDefault()) + if proxyDefault != nil && target.MeshGateway.Mode == structs.MeshGatewayModeDefault { + target.MeshGateway.Mode = proxyDefault.MeshGateway.Mode } if c.overrideMeshGateway.Mode != structs.MeshGatewayModeDefault { @@ -987,7 +989,7 @@ RESOLVE_AGAIN: target, failover.Service, failover.ServiceSubset, - target.Partition, + failover.Partition, failover.Namespace, dc, ) @@ -1001,7 +1003,7 @@ RESOLVE_AGAIN: target, failover.Service, failover.ServiceSubset, - target.Partition, + failover.Partition, failover.Namespace, "", ) diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 360b40e48..1cd575815 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -158,14 +158,14 @@ func testcase_JustRouterWithDefaults() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -210,11 +210,11 @@ func testcase_JustRouterWithNoDestination() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { Definition: &structs.ServiceRoute{ @@ -227,7 +227,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase { NextNode: "resolver:main.default.default.dc1", }, { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -270,14 +270,14 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -321,21 +321,21 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), - NextNode: "splitter:main.default", + Definition: newDefaultServiceRoute("main", "default", "default"), + NextNode: "splitter:main.default.default", }, }, }, - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -386,21 +386,21 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), - NextNode: "splitter:main.default", + Definition: newDefaultServiceRoute("main", "default", "default"), + NextNode: "splitter:main.default.default", }, }, }, - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -458,21 +458,21 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { - Definition: newDefaultServiceRoute("main", "default"), - NextNode: "splitter:main.default", + Definition: newDefaultServiceRoute("main", "default", "default"), + NextNode: "splitter:main.default.default", }, }, }, - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -542,18 +542,18 @@ func testcase_RouteBypassesSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { Definition: &router.Routes[0], NextNode: "resolver:bypass.other.default.default.dc1", }, { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:main.default.default.dc1", }, }, @@ -605,11 +605,11 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -661,11 +661,11 @@ func testcase_NoopSplit_WithResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -724,11 +724,11 @@ func testcase_SubsetSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -801,11 +801,11 @@ func testcase_ServiceSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -898,11 +898,11 @@ func testcase_SplitBypassesSplit() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -1053,13 +1053,14 @@ func testcase_DatacenterRedirect() compileTestCase { func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase { entries := newEntries() - entries.GlobalProxy = &structs.ProxyConfigEntry{ + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, - } + }) + entries.AddResolvers( &structs.ServiceResolverConfigEntry{ Kind: "service-resolver", @@ -1300,13 +1301,15 @@ func testcase_DatacenterFailover() compileTestCase { func testcase_DatacenterFailover_WithMeshGateways() compileTestCase { entries := newEntries() - entries.GlobalProxy = &structs.ProxyConfigEntry{ + + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, - } + }) + entries.AddResolvers( &structs.ServiceResolverConfigEntry{ Kind: "service-resolver", @@ -1384,11 +1387,11 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -1446,7 +1449,8 @@ func testcase_DefaultResolver() compileTestCase { func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { entries := newEntries() - entries.GlobalProxy = &structs.ProxyConfigEntry{ + + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, Config: map[string]interface{}{ @@ -1455,7 +1459,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { MeshGateway: structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, }, - } + }) expect := &structs.CompiledDiscoveryChain{ Protocol: "grpc", @@ -1699,11 +1703,11 @@ func testcase_MultiDatacenterCanary() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -1880,11 +1884,11 @@ func testcase_AllBellsAndWhistles() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "router:main.default", + StartNode: "router:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "router:main.default": { + "router:main.default.default": { Type: structs.DiscoveryGraphNodeTypeRouter, - Name: "main.default", + Name: "main.default.default", Routes: []*structs.DiscoveryRoute{ { Definition: &router.Routes[0], @@ -1892,17 +1896,17 @@ func testcase_AllBellsAndWhistles() compileTestCase { }, { Definition: &router.Routes[1], - NextNode: "splitter:svc-split.default", + NextNode: "splitter:svc-split.default.default", }, { - Definition: newDefaultServiceRoute("main", "default"), + Definition: newDefaultServiceRoute("main", "default", "default"), NextNode: "resolver:default-subset.main.default.default.dc1", }, }, }, - "splitter:svc-split.default": { + "splitter:svc-split.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "svc-split.default", + Name: "svc-split.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -2455,11 +2459,11 @@ func testcase_LBSplitterAndResolver() compileTestCase { expect := &structs.CompiledDiscoveryChain{ Protocol: "http", - StartNode: "splitter:main.default", + StartNode: "splitter:main.default.default", Nodes: map[string]*structs.DiscoveryGraphNode{ - "splitter:main.default": { + "splitter:main.default.default": { Type: structs.DiscoveryGraphNodeTypeSplitter, - Name: "main.default", + Name: "main.default.default", Splits: []*structs.DiscoverySplit{ { Definition: &structs.ServiceSplit{ @@ -2642,13 +2646,13 @@ func newSimpleRoute(name string, muts ...func(*structs.ServiceRoute)) structs.Se } func setGlobalProxyProtocol(entries *structs.DiscoveryChainConfigEntries, protocol string) { - entries.GlobalProxy = &structs.ProxyConfigEntry{ + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ Kind: structs.ProxyDefaults, Name: structs.ProxyConfigGlobal, Config: map[string]interface{}{ "protocol": protocol, }, - } + }) } func setServiceProtocol(entries *structs.DiscoveryChainConfigEntries, name, protocol string) { diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index 594e49e1a..434e776d4 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -880,24 +880,21 @@ func readDiscoveryChainConfigEntriesTxn( sid := structs.NewServiceID(serviceName, entMeta) - // Grab the proxy defaults if they exist. - idx, proxy, err := getProxyConfigEntryTxn(tx, ws, structs.ProxyConfigGlobal, overrides, entMeta) - if err != nil { - return 0, nil, err - } else if proxy != nil { - res.GlobalProxy = proxy - } - - // At every step we'll need service defaults. + // At every step we'll need service and proxy defaults. todoDefaults[sid] = struct{}{} + var maxIdx uint64 + // first fetch the router, of which we only collect 1 per chain eval - _, router, err := getRouterConfigEntryTxn(tx, ws, serviceName, overrides, entMeta) + idx, router, err := getRouterConfigEntryTxn(tx, ws, serviceName, overrides, entMeta) if err != nil { return 0, nil, err } else if router != nil { res.Routers[sid] = router } + if idx > maxIdx { + maxIdx = idx + } if router != nil { for _, svc := range router.ListRelatedServices() { @@ -922,10 +919,13 @@ func readDiscoveryChainConfigEntriesTxn( // Yes, even for splitters. todoDefaults[splitID] = struct{}{} - _, splitter, err := getSplitterConfigEntryTxn(tx, ws, splitID.ID, overrides, &splitID.EnterpriseMeta) + idx, splitter, err := getSplitterConfigEntryTxn(tx, ws, splitID.ID, overrides, &splitID.EnterpriseMeta) if err != nil { return 0, nil, err } + if idx > maxIdx { + maxIdx = idx + } if splitter == nil { res.Splitters[splitID] = nil @@ -959,10 +959,13 @@ func readDiscoveryChainConfigEntriesTxn( // And resolvers, too. todoDefaults[resolverID] = struct{}{} - _, resolver, err := getResolverConfigEntryTxn(tx, ws, resolverID.ID, overrides, &resolverID.EnterpriseMeta) + idx, resolver, err := getResolverConfigEntryTxn(tx, ws, resolverID.ID, overrides, &resolverID.EnterpriseMeta) if err != nil { return 0, nil, err } + if idx > maxIdx { + maxIdx = idx + } if resolver == nil { res.Resolvers[resolverID] = nil @@ -987,16 +990,31 @@ func readDiscoveryChainConfigEntriesTxn( continue // already fetched } - _, entry, err := getServiceConfigEntryTxn(tx, ws, svcID.ID, overrides, &svcID.EnterpriseMeta) + if _, ok := res.ProxyDefaults[svcID.PartitionOrDefault()]; !ok { + idx, proxy, err := getProxyConfigEntryTxn(tx, ws, structs.ProxyConfigGlobal, overrides, &svcID.EnterpriseMeta) + if err != nil { + return 0, nil, err + } + if idx > maxIdx { + maxIdx = idx + } + if proxy != nil { + res.ProxyDefaults[proxy.PartitionOrDefault()] = proxy + } + } + + idx, entry, err := getServiceConfigEntryTxn(tx, ws, svcID.ID, overrides, &svcID.EnterpriseMeta) if err != nil { return 0, nil, err } + if idx > maxIdx { + maxIdx = idx + } if entry == nil { res.Services[svcID] = nil continue } - res.Services[svcID] = entry } diff --git a/agent/consul/state/config_entry_test.go b/agent/consul/state/config_entry_test.go index b47d9a356..daddc9bb8 100644 --- a/agent/consul/state/config_entry_test.go +++ b/agent/consul/state/config_entry_test.go @@ -1347,6 +1347,13 @@ func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []Config &entry.EnterpriseMeta, )) } + for _, entry := range entrySet.ProxyDefaults { + out = append(out, NewConfigEntryKindName( + entry.Kind, + entry.Name, + &entry.EnterpriseMeta, + )) + } return out } diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 35287f442..37b204057 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -119,6 +119,9 @@ func (e *ServiceRouterConfigEntry) Normalize() error { if route.Destination != nil && route.Destination.Namespace == "" { route.Destination.Namespace = e.EnterpriseMeta.NamespaceOrEmpty() } + if route.Destination != nil && route.Destination.Partition == "" { + route.Destination.Partition = e.EnterpriseMeta.PartitionOrEmpty() + } } return nil @@ -381,6 +384,13 @@ type ServiceRouteDestination struct { // splitting. Namespace string `json:",omitempty"` + // Partition is the partition to resolve the service from instead of the + // current partition. If empty the current partition is assumed. + // + // If this field is specified then this route is ineligible for further + // splitting. + Partition string `json:",omitempty"` + // PrefixRewrite allows for the proxied request to have its matching path // prefix modified before being sent to the destination. Described more // below in the envoy implementation section. @@ -557,8 +567,8 @@ func (e *ServiceSplitterConfigEntry) Validate() error { } if _, ok := found[splitKey]; ok { return fmt.Errorf( - "split destination occurs more than once: service=%q, subset=%q, namespace=%q", - splitKey.Service, splitKey.ServiceSubset, splitKey.Namespace, + "split destination occurs more than once: service=%q, subset=%q, namespace=%q, partition=%q", + splitKey.Service, splitKey.ServiceSubset, splitKey.Namespace, splitKey.Partition, ) } found[splitKey] = struct{}{} @@ -665,7 +675,12 @@ type ServiceSplit struct { // splitting. Namespace string `json:",omitempty"` - // NOTE: Partition is not represented here by design. Do not add it. + // Partition is the partition to resolve the service from instead of the + // current partition. If empty the current partition is assumed (optional). + // + // If this field is specified then this route is ineligible for further + // splitting. + Partition string `json:",omitempty"` // NOTE: Any configuration added to Splits that needs to be passed to the // proxy needs special handling MergeParent below. @@ -930,9 +945,13 @@ func (e *ServiceResolverConfigEntry) Validate() error { } if e.Redirect != nil { - if e.PartitionOrEmpty() != acl.DefaultPartitionName && e.Redirect.Datacenter != "" { - return fmt.Errorf("Cross datacenters redirect is not allowed for non default partition") + if !e.InDefaultPartition() && e.Redirect.Datacenter != "" { + return fmt.Errorf("Cross-datacenter redirect is not supported in non-default partitions") } + if PartitionOrDefault(e.Redirect.Partition) != e.PartitionOrDefault() && e.Redirect.Datacenter != "" { + return fmt.Errorf("Cross-datacenter and cross-partition redirect is not supported") + } + r := e.Redirect if len(e.Failover) > 0 { @@ -941,7 +960,7 @@ func (e *ServiceResolverConfigEntry) Validate() error { // TODO(rb): prevent subsets and default subsets from being defined? - if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Datacenter == "" { + if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" { return fmt.Errorf("Redirect is empty") } @@ -952,6 +971,9 @@ func (e *ServiceResolverConfigEntry) Validate() error { if r.Namespace != "" { return fmt.Errorf("Redirect.Namespace defined without Redirect.Service") } + if r.Partition != "" { + return fmt.Errorf("Redirect.Partition defined without Redirect.Service") + } } else if r.Service == e.Name { if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) { return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service) @@ -962,15 +984,19 @@ func (e *ServiceResolverConfigEntry) Validate() error { if len(e.Failover) > 0 { for subset, f := range e.Failover { - if e.PartitionOrEmpty() != acl.DefaultPartitionName && len(f.Datacenters) != 0 { - return fmt.Errorf("Cross datacenters failover is not allowed for non default partition") + if !e.InDefaultPartition() && len(f.Datacenters) != 0 { + return fmt.Errorf("Cross-datacenter failover is not supported in non-default partitions") } + if PartitionOrDefault(f.Partition) != e.PartitionOrDefault() && len(f.Datacenters) != 0 { + return fmt.Errorf("Cross-datacenter and cross-partition failover is not supported") + } + if subset != "*" && !isSubset(subset) { return fmt.Errorf("Bad Failover[%q]: not a valid subset", subset) } - if f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 { - return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, Namespace, or Datacenters is required", subset) + if f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && f.Partition == "" && len(f.Datacenters) == 0 { + return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, Namespace, Partition, or Datacenters is required", subset) } if f.ServiceSubset != "" { @@ -1141,6 +1167,10 @@ type ServiceResolverRedirect struct { // current one (optional). Namespace string `json:",omitempty"` + // Partition is the partition to resolve the service from instead of the + // current one (optional). + Partition string `json:",omitempty"` + // Datacenter is the datacenter to resolve the service from instead of the // current one (optional). Datacenter string `json:",omitempty"` @@ -1172,6 +1202,13 @@ type ServiceResolverFailover struct { // This is a DESTINATION during failover. Namespace string `json:",omitempty"` + // Partition is the partition to resolve the requested service from to form + // the failover group of instances. If empty the current partition is used + // (optional). + // + // This is a DESTINATION during failover. + Partition string `json:",omitempty"` + // Datacenters is a fixed list of datacenters to try. We never try a // datacenter multiple times, so those are subtracted from this list before // proceeding. @@ -1309,19 +1346,20 @@ func canWriteDiscoveryChain(entry discoveryChainConfigEntry, authz acl.Authorize // DiscoveryChainConfigEntries wraps just the raw cross-referenced config // entries. None of these are defaulted. type DiscoveryChainConfigEntries struct { - Routers map[ServiceID]*ServiceRouterConfigEntry - Splitters map[ServiceID]*ServiceSplitterConfigEntry - Resolvers map[ServiceID]*ServiceResolverConfigEntry - Services map[ServiceID]*ServiceConfigEntry - GlobalProxy *ProxyConfigEntry + Routers map[ServiceID]*ServiceRouterConfigEntry + Splitters map[ServiceID]*ServiceSplitterConfigEntry + Resolvers map[ServiceID]*ServiceResolverConfigEntry + Services map[ServiceID]*ServiceConfigEntry + ProxyDefaults map[string]*ProxyConfigEntry } func NewDiscoveryChainConfigEntries() *DiscoveryChainConfigEntries { return &DiscoveryChainConfigEntries{ - Routers: make(map[ServiceID]*ServiceRouterConfigEntry), - Splitters: make(map[ServiceID]*ServiceSplitterConfigEntry), - Resolvers: make(map[ServiceID]*ServiceResolverConfigEntry), - Services: make(map[ServiceID]*ServiceConfigEntry), + Routers: make(map[ServiceID]*ServiceRouterConfigEntry), + Splitters: make(map[ServiceID]*ServiceSplitterConfigEntry), + Resolvers: make(map[ServiceID]*ServiceResolverConfigEntry), + Services: make(map[ServiceID]*ServiceConfigEntry), + ProxyDefaults: make(map[string]*ProxyConfigEntry), } } @@ -1353,6 +1391,13 @@ func (e *DiscoveryChainConfigEntries) GetService(sid ServiceID) *ServiceConfigEn return nil } +func (e *DiscoveryChainConfigEntries) GetProxyDefaults(partition string) *ProxyConfigEntry { + if e.ProxyDefaults != nil { + return e.ProxyDefaults[partition] + } + return nil +} + // AddRouters adds router configs. Convenience function for testing. func (e *DiscoveryChainConfigEntries) AddRouters(entries ...*ServiceRouterConfigEntry) { if e.Routers == nil { @@ -1393,6 +1438,16 @@ func (e *DiscoveryChainConfigEntries) AddServices(entries ...*ServiceConfigEntry } } +// AddProxyDefaults adds proxy-defaults configs. Convenience function for testing. +func (e *DiscoveryChainConfigEntries) AddProxyDefaults(entries ...*ProxyConfigEntry) { + if e.ProxyDefaults == nil { + e.ProxyDefaults = make(map[string]*ProxyConfigEntry) + } + for _, entry := range entries { + e.ProxyDefaults[entry.PartitionOrDefault()] = entry + } +} + // AddEntries adds generic configs. Convenience function for testing. Panics on // operator error. func (e *DiscoveryChainConfigEntries) AddEntries(entries ...ConfigEntry) { @@ -1410,7 +1465,7 @@ func (e *DiscoveryChainConfigEntries) AddEntries(entries ...ConfigEntry) { if entry.GetName() != ProxyConfigGlobal { panic("the only supported proxy-defaults name is '" + ProxyConfigGlobal + "'") } - e.GlobalProxy = entry.(*ProxyConfigEntry) + e.AddProxyDefaults(entry.(*ProxyConfigEntry)) default: panic("unhandled config entry kind: " + entry.GetKind()) } @@ -1418,7 +1473,7 @@ func (e *DiscoveryChainConfigEntries) AddEntries(entries ...ConfigEntry) { } func (e *DiscoveryChainConfigEntries) IsEmpty() bool { - return e.IsChainEmpty() && len(e.Services) == 0 && e.GlobalProxy == nil + return e.IsChainEmpty() && len(e.Services) == 0 && len(e.ProxyDefaults) == 0 } func (e *DiscoveryChainConfigEntries) IsChainEmpty() bool { diff --git a/agent/structs/config_entry_discoverychain_test.go b/agent/structs/config_entry_discoverychain_test.go index a3fb49b4a..e2554977a 100644 --- a/agent/structs/config_entry_discoverychain_test.go +++ b/agent/structs/config_entry_discoverychain_test.go @@ -695,7 +695,7 @@ func TestServiceResolverConfigEntry(t *testing.T) { "v1": {}, }, }, - validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, Namespace, or Datacenters is required`, + validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, Namespace, Partition, or Datacenters is required`, }, { name: "failover to self using invalid subset", diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index ce507845a..b5978ad29 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -513,6 +513,14 @@ function run_container_s2-secondary { common_run_container_service s2-secondary secondary 8181 8179 } +function run_container_s2-ap1 { + common_run_container_service s2 ap1 8480 8479 +} + +function run_container_s3-ap1 { + common_run_container_service s3 ap1 8580 8579 +} + function common_run_container_sidecar_proxy { local service="$1" local CLUSTER="$2" @@ -581,6 +589,14 @@ function run_container_s2-sidecar-proxy-secondary { common_run_container_sidecar_proxy s2 secondary } +function run_container_s2-ap1-sidecar-proxy { + common_run_container_sidecar_proxy s2 ap1 +} + +function run_container_s3-ap1-sidecar-proxy { + common_run_container_sidecar_proxy s3 ap1 +} + function common_run_container_gateway { local name="$1" local DC="$2" From 65875a7c69069af42709d783a6a460e839127ef2 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 6 Dec 2021 11:31:05 -0700 Subject: [PATCH 43/60] Remove support for failover to partition Failing over to a partition is more siimilar to failing over to another datacenter than it is to failing over to a namespace. In a future release we should update how localities for failover are specified. We should be able to accept a list of localities which can include both partition and datacenter. --- agent/consul/discoverychain/compile.go | 4 ++-- agent/structs/config_entry_discoverychain.go | 18 ++++-------------- .../config_entry_discoverychain_test.go | 2 +- 3 files changed, 7 insertions(+), 17 deletions(-) diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index c795ef88c..12de7f45d 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -989,7 +989,7 @@ RESOLVE_AGAIN: target, failover.Service, failover.ServiceSubset, - failover.Partition, + target.Partition, failover.Namespace, dc, ) @@ -1003,7 +1003,7 @@ RESOLVE_AGAIN: target, failover.Service, failover.ServiceSubset, - failover.Partition, + target.Partition, failover.Namespace, "", ) diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 37b204057..27980a4d1 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -946,7 +946,7 @@ func (e *ServiceResolverConfigEntry) Validate() error { if e.Redirect != nil { if !e.InDefaultPartition() && e.Redirect.Datacenter != "" { - return fmt.Errorf("Cross-datacenter redirect is not supported in non-default partitions") + return fmt.Errorf("Cross-datacenter redirect is only supported in the default partition") } if PartitionOrDefault(e.Redirect.Partition) != e.PartitionOrDefault() && e.Redirect.Datacenter != "" { return fmt.Errorf("Cross-datacenter and cross-partition redirect is not supported") @@ -985,18 +985,15 @@ func (e *ServiceResolverConfigEntry) Validate() error { for subset, f := range e.Failover { if !e.InDefaultPartition() && len(f.Datacenters) != 0 { - return fmt.Errorf("Cross-datacenter failover is not supported in non-default partitions") - } - if PartitionOrDefault(f.Partition) != e.PartitionOrDefault() && len(f.Datacenters) != 0 { - return fmt.Errorf("Cross-datacenter and cross-partition failover is not supported") + return fmt.Errorf("Cross-datacenter failover is only supported in the default partition") } if subset != "*" && !isSubset(subset) { return fmt.Errorf("Bad Failover[%q]: not a valid subset", subset) } - if f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && f.Partition == "" && len(f.Datacenters) == 0 { - return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, Namespace, Partition, or Datacenters is required", subset) + if f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 { + return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, Namespace, or Datacenters is required", subset) } if f.ServiceSubset != "" { @@ -1202,13 +1199,6 @@ type ServiceResolverFailover struct { // This is a DESTINATION during failover. Namespace string `json:",omitempty"` - // Partition is the partition to resolve the requested service from to form - // the failover group of instances. If empty the current partition is used - // (optional). - // - // This is a DESTINATION during failover. - Partition string `json:",omitempty"` - // Datacenters is a fixed list of datacenters to try. We never try a // datacenter multiple times, so those are subtracted from this list before // proceeding. diff --git a/agent/structs/config_entry_discoverychain_test.go b/agent/structs/config_entry_discoverychain_test.go index e2554977a..a3fb49b4a 100644 --- a/agent/structs/config_entry_discoverychain_test.go +++ b/agent/structs/config_entry_discoverychain_test.go @@ -695,7 +695,7 @@ func TestServiceResolverConfigEntry(t *testing.T) { "v1": {}, }, }, - validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, Namespace, Partition, or Datacenters is required`, + validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, Namespace, or Datacenters is required`, }, { name: "failover to self using invalid subset", From 7c326d2a0c7dcb6cc0912008d3315d082185f521 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 6 Dec 2021 11:58:33 -0700 Subject: [PATCH 44/60] Update api module and decoding tests --- api/config_entry_discoverychain.go | 20 ++++---- command/config/write/config_write_test.go | 58 +++++++++++++++++------ 2 files changed, 54 insertions(+), 24 deletions(-) diff --git a/api/config_entry_discoverychain.go b/api/config_entry_discoverychain.go index b9c599485..dfb2bcc10 100644 --- a/api/config_entry_discoverychain.go +++ b/api/config_entry_discoverychain.go @@ -63,10 +63,10 @@ type ServiceRouteHTTPMatchQueryParam struct { } type ServiceRouteDestination struct { - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` NumRetries uint32 `json:",omitempty" alias:"num_retries"` @@ -134,11 +134,11 @@ func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.Crea func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } type ServiceSplit struct { - Weight float32 - Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. + Weight float32 + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` RequestHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"request_headers"` ResponseHeaders *HTTPHeaderModifiers `json:",omitempty" alias:"response_headers"` } @@ -216,9 +216,9 @@ type ServiceResolverSubset struct { type ServiceResolverRedirect struct { Service string `json:",omitempty"` ServiceSubset string `json:",omitempty" alias:"service_subset"` - // Referencing other partitions is not supported. - Namespace string `json:",omitempty"` - Datacenter string `json:",omitempty"` + Namespace string `json:",omitempty"` + Partition string `json:",omitempty"` + Datacenter string `json:",omitempty"` } type ServiceResolverFailover struct { diff --git a/command/config/write/config_write_test.go b/command/config/write/config_write_test.go index fc80e3b98..0ff5152c9 100644 --- a/command/config/write/config_write_test.go +++ b/command/config/write/config_write_test.go @@ -791,6 +791,7 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-router" name = "main" + partition = "pepper" meta { "foo" = "bar" "gir" = "zim" @@ -830,12 +831,13 @@ func TestParseConfigEntry(t *testing.T) { } } destination { - service = "carrot" - service_subset = "kale" - namespace = "leek" - prefix_rewrite = "/alternate" - request_timeout = "99s" - num_retries = 12345 + service = "carrot" + service_subset = "kale" + namespace = "leek" + partition = "chard" + prefix_rewrite = "/alternate" + request_timeout = "99s" + num_retries = 12345 retry_on_connect_failure = true retry_on_status_codes = [401, 209] } @@ -874,6 +876,7 @@ func TestParseConfigEntry(t *testing.T) { camel: ` Kind = "service-router" Name = "main" + Partition = "pepper" Meta { "foo" = "bar" "gir" = "zim" @@ -916,6 +919,7 @@ func TestParseConfigEntry(t *testing.T) { Service = "carrot" ServiceSubset = "kale" Namespace = "leek" + Partition = "chard" PrefixRewrite = "/alternate" RequestTimeout = "99s" NumRetries = 12345 @@ -958,6 +962,7 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-router", "name": "main", + "partition": "pepper", "meta" : { "foo": "bar", "gir": "zim" @@ -1000,6 +1005,7 @@ func TestParseConfigEntry(t *testing.T) { "service": "carrot", "service_subset": "kale", "namespace": "leek", + "partition": "chard", "prefix_rewrite": "/alternate", "request_timeout": "99s", "num_retries": 12345, @@ -1049,6 +1055,7 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-router", "Name": "main", + "Partition": "pepper", "Meta" : { "foo": "bar", "gir": "zim" @@ -1091,6 +1098,7 @@ func TestParseConfigEntry(t *testing.T) { "Service": "carrot", "ServiceSubset": "kale", "Namespace": "leek", + "Partition": "chard", "PrefixRewrite": "/alternate", "RequestTimeout": "99s", "NumRetries": 12345, @@ -1137,8 +1145,9 @@ func TestParseConfigEntry(t *testing.T) { } `, expect: &api.ServiceRouterConfigEntry{ - Kind: "service-router", - Name: "main", + Kind: "service-router", + Name: "main", + Partition: "pepper", Meta: map[string]string{ "foo": "bar", "gir": "zim", @@ -1181,6 +1190,7 @@ func TestParseConfigEntry(t *testing.T) { Service: "carrot", ServiceSubset: "kale", Namespace: "leek", + Partition: "chard", PrefixRewrite: "/alternate", RequestTimeout: 99 * time.Second, NumRetries: 12345, @@ -1225,6 +1235,7 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-splitter" name = "main" + partition = "east" meta { "foo" = "bar" "gir" = "zim" @@ -1242,12 +1253,14 @@ func TestParseConfigEntry(t *testing.T) { weight = 0.9 service = "other" namespace = "alt" + partition = "west" }, ] `, camel: ` Kind = "service-splitter" Name = "main" + Partition = "east" Meta { "foo" = "bar" "gir" = "zim" @@ -1265,6 +1278,7 @@ func TestParseConfigEntry(t *testing.T) { Weight = 0.9 Service = "other" Namespace = "alt" + Partition = "west" }, ] `, @@ -1272,6 +1286,7 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-splitter", "name": "main", + "partition": "east", "meta" : { "foo": "bar", "gir": "zim" @@ -1288,7 +1303,8 @@ func TestParseConfigEntry(t *testing.T) { { "weight": 0.9, "service": "other", - "namespace": "alt" + "namespace": "alt", + "partition": "west" } ] } @@ -1297,6 +1313,7 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-splitter", "Name": "main", + "Partition": "east", "Meta" : { "foo": "bar", "gir": "zim" @@ -1313,14 +1330,16 @@ func TestParseConfigEntry(t *testing.T) { { "Weight": 0.9, "Service": "other", - "Namespace": "alt" + "Namespace": "alt", + "Partition": "west" } ] } `, expect: &api.ServiceSplitterConfigEntry{ - Kind: api.ServiceSplitter, - Name: "main", + Kind: api.ServiceSplitter, + Name: "main", + Partition: "east", Meta: map[string]string{ "foo": "bar", "gir": "zim", @@ -1338,6 +1357,7 @@ func TestParseConfigEntry(t *testing.T) { Weight: 0.9, Service: "other", Namespace: "alt", + Partition: "west", }, }, }, @@ -1512,20 +1532,24 @@ func TestParseConfigEntry(t *testing.T) { snake: ` kind = "service-resolver" name = "main" + partition = "east" redirect { service = "other" service_subset = "backup" namespace = "alt" + partition = "west" datacenter = "dc9" } `, camel: ` Kind = "service-resolver" Name = "main" + Partition = "east" Redirect { Service = "other" ServiceSubset = "backup" Namespace = "alt" + Partition = "west" Datacenter = "dc9" } `, @@ -1533,10 +1557,12 @@ func TestParseConfigEntry(t *testing.T) { { "kind": "service-resolver", "name": "main", + "partition": "east", "redirect": { "service": "other", "service_subset": "backup", "namespace": "alt", + "partition": "west", "datacenter": "dc9" } } @@ -1545,21 +1571,25 @@ func TestParseConfigEntry(t *testing.T) { { "Kind": "service-resolver", "Name": "main", + "Partition": "east", "Redirect": { "Service": "other", "ServiceSubset": "backup", "Namespace": "alt", + "Partition": "west", "Datacenter": "dc9" } } `, expect: &api.ServiceResolverConfigEntry{ - Kind: "service-resolver", - Name: "main", + Kind: "service-resolver", + Name: "main", + Partition: "east", Redirect: &api.ServiceResolverRedirect{ Service: "other", ServiceSubset: "backup", Namespace: "alt", + Partition: "west", Datacenter: "dc9", }, }, From 21ce4d2fb7469d48ff0813b3b448bdbba47955e1 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 6 Dec 2021 12:35:11 -0700 Subject: [PATCH 45/60] Add changelog entry --- .changelog/11757.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11757.txt diff --git a/.changelog/11757.txt b/.changelog/11757.txt new file mode 100644 index 000000000..897fa1fcc --- /dev/null +++ b/.changelog/11757.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: **(Enterprise only)** add support for targeting partitions in discovery chain routes, splits, and redirects. +``` \ No newline at end of file From aa896fd444d46d8915c680da07edaa5a6333714d Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 6 Dec 2021 14:45:44 -0600 Subject: [PATCH 46/60] fix test failures --- api/config_entry_discoverychain_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/api/config_entry_discoverychain_test.go b/api/config_entry_discoverychain_test.go index 357f1ff5c..b56372a26 100644 --- a/api/config_entry_discoverychain_test.go +++ b/api/config_entry_discoverychain_test.go @@ -242,6 +242,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { Service: "test-failover", ServiceSubset: "v2", Namespace: defaultNamespace, + Partition: defaultPartition, PrefixRewrite: "/", RequestTimeout: 5 * time.Second, NumRetries: 5, From c11b59f3a24c8dc3e75db6aa664860014cd71dd1 Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" <4903+rboyer@users.noreply.github.com> Date: Mon, 6 Dec 2021 14:59:35 -0600 Subject: [PATCH 47/60] sync back 1.11.0-beta3 changelogs (#11759) --- CHANGELOG.md | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fa24d3ef..5032e6b73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,60 @@ -## UNRELEASED +## 1.11.0-beta3 (November 17, 2021) + +SECURITY: + +* agent: Use SHA256 instead of MD5 to generate persistence file names. [[GH-11491](https://github.com/hashicorp/consul/issues/11491)] +* namespaces: **(Enterprise only)** Creating or editing namespaces that include default ACL policies or ACL roles now requires `acl:write` permission in the default namespace. This change fixes CVE-2021-41805. + +FEATURES: + +* ca: Add a configurable TTL for Connect CA root certificates. The configuration is supported by the Vault and Consul providers. [[GH-11428](https://github.com/hashicorp/consul/issues/11428)] +* ca: Add a configurable TTL to the AWS ACM Private CA provider root certificate. [[GH-11449](https://github.com/hashicorp/consul/issues/11449)] +* health-checks: add support for h2c in http2 ping health checks [[GH-10690](https://github.com/hashicorp/consul/issues/10690)] +* partitions: **(Enterprise only)** segment serf LAN gossip between nodes in different partitions +* ui: Adding support of Consul API Gateway as an external source. [[GH-11371](https://github.com/hashicorp/consul/issues/11371)] +* ui: Topology - New views for scenarios where no dependencies exist or ACLs are disabled [[GH-11280](https://github.com/hashicorp/consul/issues/11280)] + +IMPROVEMENTS: + +* ci: Artifact builds will now only run on merges to the release branches or to `main` [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packages are now available for all supported Linux architectures including arm, arm64, 386, and amd64 [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* ci: The Linux packaging service configs and pre/post install scripts are now available under [.release/linux] [[GH-11417](https://github.com/hashicorp/consul/issues/11417)] +* config: warn the user if client_addr is empty because client services won't be listening [[GH-11461](https://github.com/hashicorp/consul/issues/11461)] +* connect/ca: Return an error when querying roots from uninitialized CA. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)] +* connect: **(Enterprise only)** Allow ingress gateways to target services in another partition [[GH-11566](https://github.com/hashicorp/consul/issues/11566)] +* connect: add Namespace configuration setting for Vault CA provider [[GH-11477](https://github.com/hashicorp/consul/issues/11477)] +* namespaces: **(Enterprise only)** policy and role defaults can reference policies in any namespace in the same partition by ID +* partitions: Prevent writing partition-exports entries to secondary DCs. [[GH-11541](https://github.com/hashicorp/consul/issues/11541)] +* sdk: Add support for iptable rules that allow DNS lookup redirection to Consul DNS. [[GH-11480](https://github.com/hashicorp/consul/issues/11480)] +* segments: **(Enterprise only)** ensure that the serf_lan_allowed_cidrs applies to network segments [[GH-11495](https://github.com/hashicorp/consul/issues/11495)] +* ui: Add upstream icons for upstreams and upstream instances [[GH-11556](https://github.com/hashicorp/consul/issues/11556)] +* ui: Update UI browser support to 'roughly ~2 years back' [[GH-11505](https://github.com/hashicorp/consul/issues/11505)] +* ui: When switching partitions reset the namespace back to the tokens default namespace or default [[GH-11479](https://github.com/hashicorp/consul/issues/11479)] +* ui: added copy to clipboard button in code editor toolbars [[GH-11474](https://github.com/hashicorp/consul/issues/11474)] + +BUG FIXES: + +* acl: **(Enterprise only)** fix namespace and namespace_prefix policy evaluation when both govern an authz request +* api: ensure new partition fields are omit empty for compatibility with older versions of consul [[GH-11585](https://github.com/hashicorp/consul/issues/11585)] +* connect/ca: Allow secondary initialization to resume after being deferred due to unreachable or incompatible primary DC servers. [[GH-11514](https://github.com/hashicorp/consul/issues/11514)] +* connect: fix issue with attempting to generate an invalid upstream cluster from UpstreamConfig.Defaults. [[GH-11245](https://github.com/hashicorp/consul/issues/11245)] +* macos: fixes building with a non-Apple LLVM (such as installed via Homebrew) [[GH-11586](https://github.com/hashicorp/consul/issues/11586)] +* namespaces: **(Enterprise only)** ensure the namespace replicator doesn't replicate deleted namespaces +* partitions: **(Enterprise only)** fix panic when forwarding delete operations to the leader +* snapshot: **(Enterprise only)** fixed a bug where the snapshot agent would ignore the `license_path` setting in config files +* snapshot: **(Enterprise only)** snapshot agent no longer attempts to refresh its license from the server when a local license is provided (i.e. via config or an environment variable) +* state: **(Enterprise Only)** ensure partition delete triggers namespace deletes +* ui: **(Enterprise only)** When no namespace is selected, make sure to default to the tokens default namespace when requesting permissions [[GH-11472](https://github.com/hashicorp/consul/issues/11472)] +* ui: Ensure the UI stores the default partition for the users token [[GH-11591](https://github.com/hashicorp/consul/issues/11591)] +* ui: Ensure we check intention permissions for specific services when deciding +whether to show action buttons for per service intention actions [[GH-11409](https://github.com/hashicorp/consul/issues/11409)] +* ui: Filter the global intentions list by the currently selected parition rather +than a wildcard [[GH-11475](https://github.com/hashicorp/consul/issues/11475)] +* ui: Revert to depending on the backend, 'post-user-action', to report +permissions errors rather than using UI capabilities 'pre-user-action' [[GH-11520](https://github.com/hashicorp/consul/issues/11520)] +* ui: code editor styling (layout consistency + wide screen support) [[GH-11474](https://github.com/hashicorp/consul/issues/11474)] +* windows: fixes arm and arm64 builds [[GH-11586](https://github.com/hashicorp/consul/issues/11586)] +* xds: fixes a bug where replacing a mesh gateway node used for WAN federation (with another that has a different IP) could leave gateways in the other DC unable to re-establish the connection [[GH-11522](https://github.com/hashicorp/consul/issues/11522)] ## 1.11.0-beta2 (November 02, 2021) From 89e90d1ffcc1525b923f1159111611d44cc3b3aa Mon Sep 17 00:00:00 2001 From: "R.B. Boyer" Date: Mon, 6 Dec 2021 15:36:52 -0600 Subject: [PATCH 48/60] return the max --- agent/consul/state/config_entry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index 434e776d4..29e4e7aa5 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -1040,7 +1040,7 @@ func readDiscoveryChainConfigEntriesTxn( } } - return idx, res, nil + return maxIdx, res, nil } // anyKey returns any key from the provided map if any exist. Useful for using From 0230ebb4ef905e3e4302d092d83eeb97c18c7e22 Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Tue, 7 Dec 2021 12:12:47 +0000 Subject: [PATCH 49/60] agent/token: rename `agent_master` to `agent_recovery` (internally) (#11744) --- .changelog/11744.txt | 3 + agent/agent_endpoint.go | 2 +- agent/agent_endpoint_test.go | 6 +- agent/agent_test.go | 2 +- agent/config/builder.go | 12 +- agent/config/deprecated_test.go | 6 +- agent/config/runtime_test.go | 12 +- .../TestRuntimeConfig_Sanitize.golden | 4 +- agent/consul/acl.go | 2 +- agent/consul/acl_test.go | 2 +- agent/token/persistence.go | 54 ++++--- agent/token/persistence_test.go | 132 ++++++++++-------- agent/token/store.go | 36 ++--- agent/token/store_test.go | 88 ++++++------ 14 files changed, 194 insertions(+), 167 deletions(-) create mode 100644 .changelog/11744.txt diff --git a/.changelog/11744.txt b/.changelog/11744.txt new file mode 100644 index 000000000..2a9d3cafd --- /dev/null +++ b/.changelog/11744.txt @@ -0,0 +1,3 @@ +```release-note:note +Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled) +``` diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 6bb45de3b..9646f7f28 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1510,7 +1510,7 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) ( } case "acl_agent_master_token", "agent_master", "agent_recovery": - s.agent.tokens.UpdateAgentMasterToken(args.Token, token_store.TokenSourceAPI) + s.agent.tokens.UpdateAgentRecoveryToken(args.Token, token_store.TokenSourceAPI) case "acl_replication_token", "replication": s.agent.tokens.UpdateReplicationToken(args.Token, token_store.TokenSourceAPI) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index ed9d7b9f4..b699c9666 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -5396,7 +5396,7 @@ func TestAgent_Token(t *testing.T) { resetTokens := func(init tokens) { a.tokens.UpdateUserToken(init.user, init.userSource) a.tokens.UpdateAgentToken(init.agent, init.agentSource) - a.tokens.UpdateAgentMasterToken(init.master, init.masterSource) + a.tokens.UpdateAgentRecoveryToken(init.master, init.masterSource) a.tokens.UpdateReplicationToken(init.repl, init.replSource) } @@ -5614,7 +5614,7 @@ func TestAgent_Token(t *testing.T) { } require.Equal(t, tt.effective.user, a.tokens.UserToken()) require.Equal(t, tt.effective.agent, a.tokens.AgentToken()) - require.Equal(t, tt.effective.master, a.tokens.AgentMasterToken()) + require.Equal(t, tt.effective.master, a.tokens.AgentRecoveryToken()) require.Equal(t, tt.effective.repl, a.tokens.ReplicationToken()) tok, src := a.tokens.UserTokenAndSource() @@ -5625,7 +5625,7 @@ func TestAgent_Token(t *testing.T) { require.Equal(t, tt.raw.agent, tok) require.Equal(t, tt.raw.agentSource, src) - tok, src = a.tokens.AgentMasterTokenAndSource() + tok, src = a.tokens.AgentRecoveryTokenAndSource() require.Equal(t, tt.raw.master, tok) require.Equal(t, tt.raw.masterSource, src) diff --git a/agent/agent_test.go b/agent/agent_test.go index 68ee41559..7e6852797 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -226,7 +226,7 @@ func TestAgent_TokenStore(t *testing.T) { if got, want := a.tokens.AgentToken(), "agent"; got != want { t.Fatalf("got %q want %q", got, want) } - if got, want := a.tokens.IsAgentMasterToken("master"), true; got != want { + if got, want := a.tokens.IsAgentRecoveryToken("master"), true; got != want { t.Fatalf("got %v want %v", got, want) } } diff --git a/agent/config/builder.go b/agent/config/builder.go index 55c938212..1cc3192db 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -866,12 +866,12 @@ func (b *builder) build() (rt RuntimeConfig, err error) { ACLTokenReplication: boolVal(c.ACL.TokenReplication), ACLTokens: token.Config{ - DataDir: dataDir, - EnablePersistence: boolValWithDefault(c.ACL.EnableTokenPersistence, false), - ACLDefaultToken: stringVal(c.ACL.Tokens.Default), - ACLAgentToken: stringVal(c.ACL.Tokens.Agent), - ACLAgentMasterToken: stringVal(c.ACL.Tokens.AgentRecovery), - ACLReplicationToken: stringVal(c.ACL.Tokens.Replication), + DataDir: dataDir, + EnablePersistence: boolValWithDefault(c.ACL.EnableTokenPersistence, false), + ACLDefaultToken: stringVal(c.ACL.Tokens.Default), + ACLAgentToken: stringVal(c.ACL.Tokens.Agent), + ACLAgentRecoveryToken: stringVal(c.ACL.Tokens.AgentRecovery), + ACLReplicationToken: stringVal(c.ACL.Tokens.Replication), }, // Autopilot diff --git a/agent/config/deprecated_test.go b/agent/config/deprecated_test.go index 4cdcfb10d..3ded0ff81 100644 --- a/agent/config/deprecated_test.go +++ b/agent/config/deprecated_test.go @@ -111,7 +111,7 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { rt := result.RuntimeConfig require.Equal("token1", rt.ACLMasterToken) - require.Equal("token2", rt.ACLTokens.ACLAgentMasterToken) + require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken) }) t.Run("embedded in tokens struct", func(t *testing.T) { @@ -142,7 +142,7 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { rt := result.RuntimeConfig require.Equal("token1", rt.ACLMasterToken) - require.Equal("token2", rt.ACLTokens.ACLAgentMasterToken) + require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken) }) t.Run("both", func(t *testing.T) { @@ -170,6 +170,6 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { rt := result.RuntimeConfig require.Equal("token3", rt.ACLMasterToken) - require.Equal("token4", rt.ACLTokens.ACLAgentMasterToken) + require.Equal("token4", rt.ACLTokens.ACLAgentRecoveryToken) }) } diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index e92fb9855..82192632a 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -5341,12 +5341,12 @@ func TestLoad_FullConfig(t *testing.T) { // user configurable values ACLTokens: token.Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "418fdff1", - ACLAgentToken: "bed2377c", - ACLAgentMasterToken: "1dba6aba", - ACLReplicationToken: "5795983a", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "418fdff1", + ACLAgentToken: "bed2377c", + ACLAgentRecoveryToken: "1dba6aba", + ACLReplicationToken: "5795983a", }, ACLsEnabled: true, diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 84c303c76..083e7d853 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -14,7 +14,7 @@ }, "ACLTokenReplication": false, "ACLTokens": { - "ACLAgentMasterToken": "hidden", + "ACLAgentRecoveryToken": "hidden", "ACLAgentToken": "hidden", "ACLDefaultToken": "hidden", "ACLReplicationToken": "hidden", @@ -424,4 +424,4 @@ "Version": "", "VersionPrerelease": "", "Watches": [] -} +} \ No newline at end of file diff --git a/agent/consul/acl.go b/agent/consul/acl.go index db7d415a1..9a2c74a17 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1053,7 +1053,7 @@ func (r *ACLResolver) resolveLocallyManagedToken(token string) (structs.ACLIdent return nil, nil, false } - if r.tokens.IsAgentMasterToken(token) { + if r.tokens.IsAgentRecoveryToken(token) { return structs.NewAgentMasterTokenIdentity(r.config.NodeName, token), r.agentMasterAuthz, true } diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 8d159cbad..03707c0cb 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -4020,7 +4020,7 @@ func TestACLResolver_AgentMaster(t *testing.T) { cfg.DisableDuration = 0 }) - tokens.UpdateAgentMasterToken("9a184a11-5599-459e-b71a-550e5f9a5a23", token.TokenSourceConfig) + tokens.UpdateAgentRecoveryToken("9a184a11-5599-459e-b71a-550e5f9a5a23", token.TokenSourceConfig) ident, authz, err := r.ResolveTokenToIdentityAndAuthorizer("9a184a11-5599-459e-b71a-550e5f9a5a23") require.NoError(t, err) diff --git a/agent/token/persistence.go b/agent/token/persistence.go index c36b90364..c78e22891 100644 --- a/agent/token/persistence.go +++ b/agent/token/persistence.go @@ -17,12 +17,12 @@ type Logger interface { // Config used by Store.Load, which includes tokens and settings for persistence. type Config struct { - EnablePersistence bool - DataDir string - ACLDefaultToken string - ACLAgentToken string - ACLAgentMasterToken string - ACLReplicationToken string + EnablePersistence bool + DataDir string + ACLDefaultToken string + ACLAgentToken string + ACLAgentRecoveryToken string + ACLReplicationToken string EnterpriseConfig } @@ -69,10 +69,10 @@ func (t *Store) WithPersistenceLock(f func() error) error { } type persistedTokens struct { - Replication string `json:"replication,omitempty"` - AgentMaster string `json:"agent_master,omitempty"` - Default string `json:"default,omitempty"` - Agent string `json:"agent,omitempty"` + Replication string `json:"replication,omitempty"` + AgentRecovery string `json:"agent_recovery,omitempty"` + Default string `json:"default,omitempty"` + Agent string `json:"agent,omitempty"` } type fileStore struct { @@ -110,14 +110,14 @@ func loadTokens(s *Store, cfg Config, tokens persistedTokens, logger Logger) { s.UpdateAgentToken(cfg.ACLAgentToken, TokenSourceConfig) } - if tokens.AgentMaster != "" { - s.UpdateAgentMasterToken(tokens.AgentMaster, TokenSourceAPI) + if tokens.AgentRecovery != "" { + s.UpdateAgentRecoveryToken(tokens.AgentRecovery, TokenSourceAPI) - if cfg.ACLAgentMasterToken != "" { - logger.Warn("\"agent_master\" token present in both the configuration and persisted token store, using the persisted token") + if cfg.ACLAgentRecoveryToken != "" { + logger.Warn("\"agent_recovery\" token present in both the configuration and persisted token store, using the persisted token") } } else { - s.UpdateAgentMasterToken(cfg.ACLAgentMasterToken, TokenSourceConfig) + s.UpdateAgentRecoveryToken(cfg.ACLAgentRecoveryToken, TokenSourceConfig) } if tokens.Replication != "" { @@ -134,22 +134,32 @@ func loadTokens(s *Store, cfg Config, tokens persistedTokens, logger Logger) { } func readPersistedFromFile(filename string) (persistedTokens, error) { - tokens := persistedTokens{} + var tokens struct { + persistedTokens + + // Support reading tokens persisted by versions <1.11, where agent_master was + // renamed to agent_recovery. + LegacyAgentMaster string `json:"agent_master"` + } buf, err := ioutil.ReadFile(filename) switch { case os.IsNotExist(err): // non-existence is not an error we care about - return tokens, nil + return tokens.persistedTokens, nil case err != nil: - return tokens, fmt.Errorf("failed reading tokens file %q: %w", filename, err) + return tokens.persistedTokens, fmt.Errorf("failed reading tokens file %q: %w", filename, err) } if err := json.Unmarshal(buf, &tokens); err != nil { - return tokens, fmt.Errorf("failed to decode tokens file %q: %w", filename, err) + return tokens.persistedTokens, fmt.Errorf("failed to decode tokens file %q: %w", filename, err) } - return tokens, nil + if tokens.AgentRecovery == "" { + tokens.AgentRecovery = tokens.LegacyAgentMaster + } + + return tokens.persistedTokens, nil } func (p *fileStore) withPersistenceLock(s *Store, f func() error) error { @@ -170,8 +180,8 @@ func (p *fileStore) saveToFile(s *Store) error { tokens.Agent = tok } - if tok, source := s.AgentMasterTokenAndSource(); tok != "" && source == TokenSourceAPI { - tokens.AgentMaster = tok + if tok, source := s.AgentRecoveryTokenAndSource(); tok != "" && source == TokenSourceAPI { + tokens.AgentRecovery = tok } if tok, source := s.ReplicationTokenAndSource(); tok != "" && source == TokenSourceAPI { diff --git a/agent/token/persistence_test.go b/agent/token/persistence_test.go index ec8e7e60e..1bfe971fd 100644 --- a/agent/token/persistence_test.go +++ b/agent/token/persistence_test.go @@ -18,47 +18,47 @@ func TestStore_Load(t *testing.T) { t.Run("with empty store", func(t *testing.T) { cfg := Config{ - DataDir: dataDir, - ACLAgentToken: "alfa", - ACLAgentMasterToken: "bravo", - ACLDefaultToken: "charlie", - ACLReplicationToken: "delta", + DataDir: dataDir, + ACLAgentToken: "alfa", + ACLAgentRecoveryToken: "bravo", + ACLDefaultToken: "charlie", + ACLReplicationToken: "delta", } require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "alfa", store.AgentToken()) - require.Equal(t, "bravo", store.AgentMasterToken()) + require.Equal(t, "bravo", store.AgentRecoveryToken()) require.Equal(t, "charlie", store.UserToken()) require.Equal(t, "delta", store.ReplicationToken()) }) t.Run("updated from Config", func(t *testing.T) { cfg := Config{ - DataDir: dataDir, - ACLDefaultToken: "echo", - ACLAgentToken: "foxtrot", - ACLAgentMasterToken: "golf", - ACLReplicationToken: "hotel", + DataDir: dataDir, + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentRecoveryToken: "golf", + ACLReplicationToken: "hotel", } // ensures no error for missing persisted tokens file require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "echo", store.UserToken()) require.Equal(t, "foxtrot", store.AgentToken()) - require.Equal(t, "golf", store.AgentMasterToken()) + require.Equal(t, "golf", store.AgentRecoveryToken()) require.Equal(t, "hotel", store.ReplicationToken()) }) t.Run("with persisted tokens", func(t *testing.T) { cfg := Config{ - DataDir: dataDir, - ACLDefaultToken: "echo", - ACLAgentToken: "foxtrot", - ACLAgentMasterToken: "golf", - ACLReplicationToken: "hotel", + DataDir: dataDir, + ACLDefaultToken: "echo", + ACLAgentToken: "foxtrot", + ACLAgentRecoveryToken: "golf", + ACLReplicationToken: "hotel", } tokens := `{ "agent" : "india", - "agent_master" : "juliett", + "agent_recovery" : "juliett", "default": "kilo", "replication" : "lima" }` @@ -69,14 +69,14 @@ func TestStore_Load(t *testing.T) { // no updates since token persistence is not enabled require.Equal(t, "echo", store.UserToken()) require.Equal(t, "foxtrot", store.AgentToken()) - require.Equal(t, "golf", store.AgentMasterToken()) + require.Equal(t, "golf", store.AgentRecoveryToken()) require.Equal(t, "hotel", store.ReplicationToken()) cfg.EnablePersistence = true require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "india", store.AgentToken()) - require.Equal(t, "juliett", store.AgentMasterToken()) + require.Equal(t, "juliett", store.AgentRecoveryToken()) require.Equal(t, "kilo", store.UserToken()) require.Equal(t, "lima", store.ReplicationToken()) @@ -84,28 +84,42 @@ func TestStore_Load(t *testing.T) { require.NotNil(t, store.persistence) }) + t.Run("persisted tokens include pre-1.11 agent_master naming", func(t *testing.T) { + cfg := Config{ + EnablePersistence: true, + DataDir: dataDir, + ACLAgentRecoveryToken: "golf", + } + + tokens := `{"agent_master": "juliett"}` + require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) + require.NoError(t, store.Load(cfg, logger)) + + require.Equal(t, "juliett", store.AgentRecoveryToken()) + }) + t.Run("with persisted tokens, persisted tokens override config", func(t *testing.T) { tokens := `{ "agent" : "mike", - "agent_master" : "november", + "agent_recovery" : "november", "default": "oscar", "replication" : "papa" }` cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "quebec", - ACLAgentToken: "romeo", - ACLAgentMasterToken: "sierra", - ACLReplicationToken: "tango", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "quebec", + ACLAgentToken: "romeo", + ACLAgentRecoveryToken: "sierra", + ACLReplicationToken: "tango", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "mike", store.AgentToken()) - require.Equal(t, "november", store.AgentMasterToken()) + require.Equal(t, "november", store.AgentRecoveryToken()) require.Equal(t, "oscar", store.UserToken()) require.Equal(t, "papa", store.ReplicationToken()) }) @@ -113,35 +127,35 @@ func TestStore_Load(t *testing.T) { t.Run("with some persisted tokens", func(t *testing.T) { tokens := `{ "agent" : "uniform", - "agent_master" : "victor" + "agent_recovery" : "victor" }` cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "whiskey", - ACLAgentToken: "xray", - ACLAgentMasterToken: "yankee", - ACLReplicationToken: "zulu", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "whiskey", + ACLAgentToken: "xray", + ACLAgentRecoveryToken: "yankee", + ACLReplicationToken: "zulu", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte(tokens), 0600)) require.NoError(t, store.Load(cfg, logger)) require.Equal(t, "uniform", store.AgentToken()) - require.Equal(t, "victor", store.AgentMasterToken()) + require.Equal(t, "victor", store.AgentRecoveryToken()) require.Equal(t, "whiskey", store.UserToken()) require.Equal(t, "zulu", store.ReplicationToken()) }) t.Run("persisted file contains invalid data", func(t *testing.T) { cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "one", - ACLAgentToken: "two", - ACLAgentMasterToken: "three", - ACLReplicationToken: "four", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "one", + ACLAgentToken: "two", + ACLAgentRecoveryToken: "three", + ACLReplicationToken: "four", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, 0600)) @@ -151,18 +165,18 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "one", store.UserToken()) require.Equal(t, "two", store.AgentToken()) - require.Equal(t, "three", store.AgentMasterToken()) + require.Equal(t, "three", store.AgentRecoveryToken()) require.Equal(t, "four", store.ReplicationToken()) }) t.Run("persisted file contains invalid json", func(t *testing.T) { cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "alfa", - ACLAgentToken: "bravo", - ACLAgentMasterToken: "charlie", - ACLReplicationToken: "foxtrot", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "alfa", + ACLAgentToken: "bravo", + ACLAgentRecoveryToken: "charlie", + ACLReplicationToken: "foxtrot", } require.NoError(t, ioutil.WriteFile(tokenFile, []byte("[1,2,3]"), 0600)) @@ -172,7 +186,7 @@ func TestStore_Load(t *testing.T) { require.Equal(t, "alfa", store.UserToken()) require.Equal(t, "bravo", store.AgentToken()) - require.Equal(t, "charlie", store.AgentMasterToken()) + require.Equal(t, "charlie", store.AgentRecoveryToken()) require.Equal(t, "foxtrot", store.ReplicationToken()) }) } @@ -181,12 +195,12 @@ func TestStore_WithPersistenceLock(t *testing.T) { dataDir := testutil.TempDir(t, "datadir") store := new(Store) cfg := Config{ - EnablePersistence: true, - DataDir: dataDir, - ACLDefaultToken: "default-token", - ACLAgentToken: "agent-token", - ACLAgentMasterToken: "master-token", - ACLReplicationToken: "replication-token", + EnablePersistence: true, + DataDir: dataDir, + ACLDefaultToken: "default-token", + ACLAgentToken: "agent-token", + ACLAgentRecoveryToken: "recovery-token", + ACLReplicationToken: "replication-token", } err := store.Load(cfg, hclog.New(nil)) require.NoError(t, err) @@ -195,7 +209,7 @@ func TestStore_WithPersistenceLock(t *testing.T) { updated := store.UpdateUserToken("the-new-token", TokenSourceAPI) require.True(t, updated) - updated = store.UpdateAgentMasterToken("the-new-master-token", TokenSourceAPI) + updated = store.UpdateAgentRecoveryToken("the-new-recovery-token", TokenSourceAPI) require.True(t, updated) return nil } @@ -206,8 +220,8 @@ func TestStore_WithPersistenceLock(t *testing.T) { tokens, err := readPersistedFromFile(filepath.Join(dataDir, tokensPath)) require.NoError(t, err) expected := persistedTokens{ - Default: "the-new-token", - AgentMaster: "the-new-master-token", + Default: "the-new-token", + AgentRecovery: "the-new-recovery-token", } require.Equal(t, expected, tokens) } diff --git a/agent/token/store.go b/agent/token/store.go index 456190f70..ec2bac38f 100644 --- a/agent/token/store.go +++ b/agent/token/store.go @@ -17,7 +17,7 @@ type TokenKind int const ( TokenKindAgent TokenKind = iota - TokenKindAgentMaster + TokenKindAgentRecovery TokenKindUser TokenKindReplication ) @@ -59,13 +59,13 @@ type Store struct { // agentTokenSource indicates where this token originated from agentTokenSource TokenSource - // agentMasterToken is a special token that's only used locally for + // agentRecoveryToken is a special token that's only used locally for // access to the /v1/agent utility operations if the servers aren't // available. - agentMasterToken string + agentRecoveryToken string - // agentMasterTokenSource indicates where this token originated from - agentMasterTokenSource TokenSource + // agentRecoveryTokenSource indicates where this token originated from + agentRecoveryTokenSource TokenSource // replicationToken is a special token that's used by servers to // replicate data from the primary datacenter. @@ -188,15 +188,15 @@ func (t *Store) UpdateAgentToken(token string, source TokenSource) bool { return changed } -// UpdateAgentMasterToken replaces the current agent master token in the store. +// UpdateAgentRecoveryToken replaces the current agent recovery token in the store. // Returns true if it was changed. -func (t *Store) UpdateAgentMasterToken(token string, source TokenSource) bool { +func (t *Store) UpdateAgentRecoveryToken(token string, source TokenSource) bool { t.l.Lock() - changed := t.agentMasterToken != token || t.agentMasterTokenSource != source - t.agentMasterToken = token - t.agentMasterTokenSource = source + changed := t.agentRecoveryToken != token || t.agentRecoveryTokenSource != source + t.agentRecoveryToken = token + t.agentRecoveryTokenSource = source if changed { - t.sendNotificationLocked(TokenKindAgentMaster) + t.sendNotificationLocked(TokenKindAgentRecovery) } t.l.Unlock() return changed @@ -239,11 +239,11 @@ func (t *Store) AgentToken() string { return t.userToken } -func (t *Store) AgentMasterToken() string { +func (t *Store) AgentRecoveryToken() string { t.l.RLock() defer t.l.RUnlock() - return t.agentMasterToken + return t.agentRecoveryToken } // ReplicationToken returns the replication token. @@ -270,11 +270,11 @@ func (t *Store) AgentTokenAndSource() (string, TokenSource) { return t.agentToken, t.agentTokenSource } -func (t *Store) AgentMasterTokenAndSource() (string, TokenSource) { +func (t *Store) AgentRecoveryTokenAndSource() (string, TokenSource) { t.l.RLock() defer t.l.RUnlock() - return t.agentMasterToken, t.agentMasterTokenSource + return t.agentRecoveryToken, t.agentRecoveryTokenSource } // ReplicationToken returns the replication token. @@ -285,11 +285,11 @@ func (t *Store) ReplicationTokenAndSource() (string, TokenSource) { return t.replicationToken, t.replicationTokenSource } -// IsAgentMasterToken checks to see if a given token is the agent master token. +// IsAgentRecoveryToken checks to see if a given token is the agent recovery token. // This will never match an empty token for safety. -func (t *Store) IsAgentMasterToken(token string) bool { +func (t *Store) IsAgentRecoveryToken(token string) bool { t.l.RLock() defer t.l.RUnlock() - return (token != "") && (subtle.ConstantTimeCompare([]byte(token), []byte(t.agentMasterToken)) == 1) + return (token != "") && (subtle.ConstantTimeCompare([]byte(token), []byte(t.agentRecoveryToken)) == 1) } diff --git a/agent/token/store_test.go b/agent/token/store_test.go index 6df812257..06b44558d 100644 --- a/agent/token/store_test.go +++ b/agent/token/store_test.go @@ -8,14 +8,14 @@ import ( func TestStore_RegularTokens(t *testing.T) { type tokens struct { - userSource TokenSource - user string - agent string - agentSource TokenSource - master string - masterSource TokenSource - repl string - replSource TokenSource + userSource TokenSource + user string + agent string + agentSource TokenSource + recovery string + recoverySource TokenSource + repl string + replSource TokenSource } tests := []struct { @@ -67,22 +67,22 @@ func TestStore_RegularTokens(t *testing.T) { effective: tokens{repl: "R"}, }, { - name: "set master - config", - set: tokens{master: "M", masterSource: TokenSourceConfig}, - raw: tokens{master: "M", masterSource: TokenSourceConfig}, - effective: tokens{master: "M"}, + name: "set recovery - config", + set: tokens{recovery: "M", recoverySource: TokenSourceConfig}, + raw: tokens{recovery: "M", recoverySource: TokenSourceConfig}, + effective: tokens{recovery: "M"}, }, { - name: "set master - api", - set: tokens{master: "M", masterSource: TokenSourceAPI}, - raw: tokens{master: "M", masterSource: TokenSourceAPI}, - effective: tokens{master: "M"}, + name: "set recovery - api", + set: tokens{recovery: "M", recoverySource: TokenSourceAPI}, + raw: tokens{recovery: "M", recoverySource: TokenSourceAPI}, + effective: tokens{recovery: "M"}, }, { name: "set all", - set: tokens{user: "U", agent: "A", repl: "R", master: "M"}, - raw: tokens{user: "U", agent: "A", repl: "R", master: "M"}, - effective: tokens{user: "U", agent: "A", repl: "R", master: "M"}, + set: tokens{user: "U", agent: "A", repl: "R", recovery: "M"}, + raw: tokens{user: "U", agent: "A", repl: "R", recovery: "M"}, + effective: tokens{user: "U", agent: "A", repl: "R", recovery: "M"}, }, } for _, tt := range tests { @@ -100,19 +100,19 @@ func TestStore_RegularTokens(t *testing.T) { require.True(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) } - if tt.set.master != "" { - require.True(t, s.UpdateAgentMasterToken(tt.set.master, tt.set.masterSource)) + if tt.set.recovery != "" { + require.True(t, s.UpdateAgentRecoveryToken(tt.set.recovery, tt.set.recoverySource)) } // If they don't change then they return false. require.False(t, s.UpdateUserToken(tt.set.user, tt.set.userSource)) require.False(t, s.UpdateAgentToken(tt.set.agent, tt.set.agentSource)) require.False(t, s.UpdateReplicationToken(tt.set.repl, tt.set.replSource)) - require.False(t, s.UpdateAgentMasterToken(tt.set.master, tt.set.masterSource)) + require.False(t, s.UpdateAgentRecoveryToken(tt.set.recovery, tt.set.recoverySource)) require.Equal(t, tt.effective.user, s.UserToken()) require.Equal(t, tt.effective.agent, s.AgentToken()) - require.Equal(t, tt.effective.master, s.AgentMasterToken()) + require.Equal(t, tt.effective.recovery, s.AgentRecoveryToken()) require.Equal(t, tt.effective.repl, s.ReplicationToken()) tok, src := s.UserTokenAndSource() @@ -123,9 +123,9 @@ func TestStore_RegularTokens(t *testing.T) { require.Equal(t, tt.raw.agent, tok) require.Equal(t, tt.raw.agentSource, src) - tok, src = s.AgentMasterTokenAndSource() - require.Equal(t, tt.raw.master, tok) - require.Equal(t, tt.raw.masterSource, src) + tok, src = s.AgentRecoveryTokenAndSource() + require.Equal(t, tt.raw.recovery, tok) + require.Equal(t, tt.raw.recoverySource, src) tok, src = s.ReplicationTokenAndSource() require.Equal(t, tt.raw.repl, tok) @@ -134,27 +134,27 @@ func TestStore_RegularTokens(t *testing.T) { } } -func TestStore_AgentMasterToken(t *testing.T) { +func TestStore_AgentRecoveryToken(t *testing.T) { s := new(Store) verify := func(want bool, toks ...string) { for _, tok := range toks { - require.Equal(t, want, s.IsAgentMasterToken(tok)) + require.Equal(t, want, s.IsAgentRecoveryToken(tok)) } } verify(false, "", "nope") - s.UpdateAgentMasterToken("master", TokenSourceConfig) - verify(true, "master") + s.UpdateAgentRecoveryToken("recovery", TokenSourceConfig) + verify(true, "recovery") verify(false, "", "nope") - s.UpdateAgentMasterToken("another", TokenSourceConfig) + s.UpdateAgentRecoveryToken("another", TokenSourceConfig) verify(true, "another") - verify(false, "", "nope", "master") + verify(false, "", "nope", "recovery") - s.UpdateAgentMasterToken("", TokenSourceConfig) - verify(false, "", "nope", "master", "another") + s.UpdateAgentRecoveryToken("", TokenSourceConfig) + verify(false, "", "nope", "recovery", "another") } func TestStore_Notify(t *testing.T) { @@ -180,7 +180,7 @@ func TestStore_Notify(t *testing.T) { agentNotifier := newNotification(t, s, TokenKindAgent) userNotifier := newNotification(t, s, TokenKindUser) - agentMasterNotifier := newNotification(t, s, TokenKindAgentMaster) + agentRecoveryNotifier := newNotification(t, s, TokenKindAgentRecovery) replicationNotifier := newNotification(t, s, TokenKindReplication) replicationNotifier2 := newNotification(t, s, TokenKindReplication) @@ -193,7 +193,7 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotifiedOnce(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) // now update the agent token which should send notificaitons to the agent and all notifier @@ -202,16 +202,16 @@ func TestStore_Notify(t *testing.T) { requireNotifiedOnce(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) - // now update the agent master token which should send notificaitons to the agent master and all notifier - require.True(t, s.UpdateAgentMasterToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) + // now update the agent recovery token which should send notificaitons to the agent recovery and all notifier + require.True(t, s.UpdateAgentRecoveryToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotifiedOnce(t, agentMasterNotifier.Ch) + requireNotifiedOnce(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) // now update the replication token which should send notificaitons to the replication and all notifier @@ -220,7 +220,7 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotifiedOnce(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotifiedOnce(t, replicationNotifier2.Ch) s.StopNotify(replicationNotifier2) @@ -231,12 +231,12 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotifiedOnce(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) requireNotNotified(t, replicationNotifier2.Ch) // request updates but that are not changes require.False(t, s.UpdateAgentToken("5d748ec2-d536-461f-8e2a-1f7eae98d559", TokenSourceAPI)) - require.False(t, s.UpdateAgentMasterToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) + require.False(t, s.UpdateAgentRecoveryToken("789badc8-f850-43e1-8742-9b9f484957cc", TokenSourceAPI)) require.False(t, s.UpdateUserToken("47788919-f944-476a-bda5-446d64be1df8", TokenSourceAPI)) require.False(t, s.UpdateReplicationToken("eb0b56b9-fa65-4ae1-902a-c64457c62ac6", TokenSourceAPI)) @@ -244,5 +244,5 @@ func TestStore_Notify(t *testing.T) { requireNotNotified(t, agentNotifier.Ch) requireNotNotified(t, userNotifier.Ch) requireNotNotified(t, replicationNotifier.Ch) - requireNotNotified(t, agentMasterNotifier.Ch) + requireNotNotified(t, agentRecoveryNotifier.Ch) } From 8bc11b08dc3ff4dc34be7d9ceeafb09580e926e2 Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Tue, 7 Dec 2021 12:39:28 +0000 Subject: [PATCH 50/60] Rename `ACLMasterToken` => `ACLInitialManagementToken` (#11746) --- agent/acl_endpoint_test.go | 2 +- agent/agent.go | 4 +-- agent/config/builder.go | 4 +-- agent/config/deprecated_test.go | 6 ++-- agent/config/runtime.go | 6 ++-- agent/config/runtime_test.go | 2 +- .../TestRuntimeConfig_Sanitize.golden | 2 +- agent/consul/acl_endpoint_test.go | 2 +- agent/consul/acl_replication_test.go | 8 ++--- agent/consul/acl_token_exp_test.go | 2 +- agent/consul/catalog_endpoint_test.go | 18 +++++------ agent/consul/config.go | 6 ++-- agent/consul/config_endpoint_test.go | 12 ++++---- agent/consul/connect_ca_endpoint_test.go | 4 +-- agent/consul/coordinate_endpoint_test.go | 6 ++-- agent/consul/discovery_chain_endpoint_test.go | 2 +- .../consul/federation_state_endpoint_test.go | 10 +++---- agent/consul/health_endpoint_test.go | 4 +-- agent/consul/intention_endpoint_test.go | 18 +++++------ agent/consul/internal_endpoint_test.go | 12 ++++---- agent/consul/kvs_endpoint_test.go | 10 +++---- agent/consul/leader.go | 26 ++++++++-------- agent/consul/leader_connect_test.go | 2 +- .../consul/leader_federation_state_ae_test.go | 4 +-- agent/consul/leader_intentions_test.go | 2 +- agent/consul/leader_test.go | 30 +++++++++---------- .../operator_autopilot_endpoint_test.go | 4 +-- agent/consul/operator_raft_endpoint_test.go | 6 ++-- agent/consul/prepared_query_endpoint_test.go | 16 +++++----- agent/consul/rpc_test.go | 4 +-- agent/consul/server_test.go | 2 +- agent/consul/session_endpoint_test.go | 6 ++-- agent/consul/snapshot_endpoint_test.go | 2 +- agent/consul/txn_endpoint_test.go | 4 +-- 34 files changed, 124 insertions(+), 124 deletions(-) diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 1f49df0e3..8c82152d1 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -881,7 +881,7 @@ func TestACL_HTTP(t *testing.T) { require.True(t, ok) require.Len(t, tokens, 1) token := tokens[0] - require.Equal(t, "Master Token", token.Description) + require.Equal(t, "Initial Management Token", token.Description) require.Len(t, token.Policies, 1) require.Equal(t, structs.ACLPolicyGlobalManagementID, token.Policies[0].ID) }) diff --git a/agent/agent.go b/agent/agent.go index ecf84fdea..d4f0397bb 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1153,8 +1153,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co if runtimeCfg.RaftTrailingLogs != 0 { cfg.RaftConfig.TrailingLogs = uint64(runtimeCfg.RaftTrailingLogs) } - if runtimeCfg.ACLMasterToken != "" { - cfg.ACLMasterToken = runtimeCfg.ACLMasterToken + if runtimeCfg.ACLInitialManagementToken != "" { + cfg.ACLInitialManagementToken = runtimeCfg.ACLInitialManagementToken } cfg.ACLTokenReplication = runtimeCfg.ACLTokenReplication cfg.ACLsEnabled = runtimeCfg.ACLsEnabled diff --git a/agent/config/builder.go b/agent/config/builder.go index 1cc3192db..a022f8846 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -860,8 +860,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) { ACLDefaultPolicy: stringVal(c.ACL.DefaultPolicy), }, - ACLEnableKeyListPolicy: boolVal(c.ACL.EnableKeyListPolicy), - ACLMasterToken: stringVal(c.ACL.Tokens.InitialManagement), + ACLEnableKeyListPolicy: boolVal(c.ACL.EnableKeyListPolicy), + ACLInitialManagementToken: stringVal(c.ACL.Tokens.InitialManagement), ACLTokenReplication: boolVal(c.ACL.TokenReplication), diff --git a/agent/config/deprecated_test.go b/agent/config/deprecated_test.go index 3ded0ff81..6cbec5448 100644 --- a/agent/config/deprecated_test.go +++ b/agent/config/deprecated_test.go @@ -110,7 +110,7 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { require.ElementsMatch(expectWarns, result.Warnings) rt := result.RuntimeConfig - require.Equal("token1", rt.ACLMasterToken) + require.Equal("token1", rt.ACLInitialManagementToken) require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken) }) @@ -141,7 +141,7 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { require.ElementsMatch(expectWarns, result.Warnings) rt := result.RuntimeConfig - require.Equal("token1", rt.ACLMasterToken) + require.Equal("token1", rt.ACLInitialManagementToken) require.Equal("token2", rt.ACLTokens.ACLAgentRecoveryToken) }) @@ -169,7 +169,7 @@ func TestLoad_DeprecatedConfig_ACLMasterTokens(t *testing.T) { require.NoError(err) rt := result.RuntimeConfig - require.Equal("token3", rt.ACLMasterToken) + require.Equal("token3", rt.ACLInitialManagementToken) require.Equal("token4", rt.ACLTokens.ACLAgentRecoveryToken) }) } diff --git a/agent/config/runtime.go b/agent/config/runtime.go index e2393363f..1d13e19d8 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -73,12 +73,12 @@ type RuntimeConfig struct { // hcl: acl.enable_key_list_policy = (true|false) ACLEnableKeyListPolicy bool - // ACLMasterToken is used to bootstrap the ACL system. It should be specified + // ACLInitialManagementToken is used to bootstrap the ACL system. It should be specified // on the servers in the PrimaryDatacenter. When the leader comes online, it ensures - // that the Master token is available. This provides the initial token. + // that the initial management token is available. This provides the initial token. // // hcl: acl.tokens.initial_management = string - ACLMasterToken string + ACLInitialManagementToken string // ACLtokenReplication is used to indicate that both tokens and policies // should be replicated instead of just policies diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 82192632a..63b216268 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -5363,7 +5363,7 @@ func TestLoad_FullConfig(t *testing.T) { ACLRoleTTL: 9876 * time.Second, }, ACLEnableKeyListPolicy: true, - ACLMasterToken: "3820e09a", + ACLInitialManagementToken: "3820e09a", ACLTokenReplication: true, AdvertiseAddrLAN: ipAddr("17.99.29.16"), AdvertiseAddrWAN: ipAddr("78.63.37.19"), diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 083e7d853..951511bcf 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -1,6 +1,6 @@ { "ACLEnableKeyListPolicy": false, - "ACLMasterToken": "hidden", + "ACLInitialManagementToken": "hidden", "ACLResolverSettings": { "ACLDefaultPolicy": "", "ACLDownPolicy": "", diff --git a/agent/consul/acl_endpoint_test.go b/agent/consul/acl_endpoint_test.go index 1dac564ab..ddf00ba11 100644 --- a/agent/consul/acl_endpoint_test.go +++ b/agent/consul/acl_endpoint_test.go @@ -32,7 +32,7 @@ func TestACLEndpoint_BootstrapTokens(t *testing.T) { t.Parallel() dir, srv, codec := testACLServerWithConfig(t, func(c *Config) { // remove this as we are bootstrapping - c.ACLMasterToken = "" + c.ACLInitialManagementToken = "" }, false) waitForLeaderEstablishment(t, srv) diff --git a/agent/consul/acl_replication_test.go b/agent/consul/acl_replication_test.go index 8bc1e8c24..14494292c 100644 --- a/agent/consul/acl_replication_test.go +++ b/agent/consul/acl_replication_test.go @@ -301,7 +301,7 @@ func TestACLReplication_Tokens(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -513,7 +513,7 @@ func TestACLReplication_Policies(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -633,7 +633,7 @@ func TestACLReplication_TokensRedacted(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -783,7 +783,7 @@ func TestACLReplication_AllTypes(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() diff --git a/agent/consul/acl_token_exp_test.go b/agent/consul/acl_token_exp_test.go index 6bb3f6ce9..17e8622c1 100644 --- a/agent/consul/acl_token_exp_test.go +++ b/agent/consul/acl_token_exp_test.go @@ -44,7 +44,7 @@ func testACLTokenReap_Primary(t *testing.T, local, global bool) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLTokenMinExpirationTTL = 10 * time.Millisecond c.ACLTokenMaxExpirationTTL = 8 * time.Second }) diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index a1d34bf99..8b2e9101e 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -184,7 +184,7 @@ func TestCatalog_Register_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -452,7 +452,7 @@ func TestCatalog_Register_ConnectProxy_ACLDestinationServiceName(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -570,7 +570,7 @@ func TestCatalog_Deregister_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1297,7 +1297,7 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2409,7 +2409,7 @@ func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2699,7 +2699,7 @@ func testACLFilterServer(t *testing.T) (dir, token string, srv *Server, codec rp dir, srv = testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) @@ -2855,7 +2855,7 @@ func TestCatalog_NodeServices_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -3258,7 +3258,7 @@ func TestCatalog_GatewayServices_ACLFiltering(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -3970,7 +3970,7 @@ func TestCatalog_VirtualIPForService_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.Build = "1.11.0" }) diff --git a/agent/consul/config.go b/agent/consul/config.go index 9c343494a..4b017da6b 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -180,10 +180,10 @@ type Config struct { // ACLEnabled is used to enable ACLs ACLsEnabled bool - // ACLMasterToken is used to bootstrap the ACL system. It should be specified + // ACLInitialManagementToken is used to bootstrap the ACL system. It should be specified // on the servers in the PrimaryDatacenter. When the leader comes online, it ensures - // that the Master token is available. This provides the initial token. - ACLMasterToken string + // that the initial management token is available. This provides the initial token. + ACLInitialManagementToken string // ACLTokenReplication is used to enabled token replication. // diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index f247cf4b9..1187120ac 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -155,7 +155,7 @@ func TestConfigEntry_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -271,7 +271,7 @@ func TestConfigEntry_Get_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -471,7 +471,7 @@ func TestConfigEntry_List_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -545,7 +545,7 @@ func TestConfigEntry_ListAll_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -750,7 +750,7 @@ func TestConfigEntry_Delete_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1959,7 +1959,7 @@ func TestConfigEntry_ResolveServiceConfig_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index c257c9fc6..d5e52f9e3 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -163,7 +163,7 @@ func TestConnectCAConfig_GetSet_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1108,7 +1108,7 @@ func TestConnectCASignValidation(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/coordinate_endpoint_test.go b/agent/consul/coordinate_endpoint_test.go index df9ac8330..c75e05d73 100644 --- a/agent/consul/coordinate_endpoint_test.go +++ b/agent/consul/coordinate_endpoint_test.go @@ -191,7 +191,7 @@ func TestCoordinate_Update_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -349,7 +349,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -524,7 +524,7 @@ func TestCoordinate_Node_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 275aff03a..d1ecb2cbe 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -26,7 +26,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/federation_state_endpoint_test.go b/agent/consul/federation_state_endpoint_test.go index 28cad49cd..a8544869c 100644 --- a/agent/consul/federation_state_endpoint_test.go +++ b/agent/consul/federation_state_endpoint_test.go @@ -116,7 +116,7 @@ func TestFederationState_Apply_Upsert_ACLDeny(t *testing.T) { c.DisableFederationStateAntiEntropy = true c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -237,7 +237,7 @@ func TestFederationState_Get_ACLDeny(t *testing.T) { c.DisableFederationStateAntiEntropy = true c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -409,7 +409,7 @@ func TestFederationState_List_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -425,7 +425,7 @@ func TestFederationState_List_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) @@ -695,7 +695,7 @@ func TestFederationState_Apply_Delete_ACLDeny(t *testing.T) { c.DisableFederationStateAntiEntropy = true c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index fe3e3ef6d..0a6100052 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -983,7 +983,7 @@ func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1289,7 +1289,7 @@ func TestHealth_ServiceNodes_Ingress_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index ec658fddb..ec3941348 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -867,7 +867,7 @@ func TestIntentionApply_aclDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1257,7 +1257,7 @@ func TestIntentionApply_aclDelete(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1323,7 +1323,7 @@ func TestIntentionApply_aclUpdate(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1377,7 +1377,7 @@ func TestIntentionApply_aclManagement(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1422,7 +1422,7 @@ func TestIntentionApply_aclUpdateChange(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1472,7 +1472,7 @@ func TestIntentionGet_acl(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1879,7 +1879,7 @@ func TestIntentionCheck_defaultACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1915,7 +1915,7 @@ func TestIntentionCheck_defaultACLAllow(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" }) defer os.RemoveAll(dir1) @@ -1951,7 +1951,7 @@ func TestIntentionCheck_aclDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index 7d293ad0e..f9105304f 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -570,7 +570,7 @@ func TestInternal_EventFire_Token(t *testing.T) { dir, srv := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDownPolicy = "deny" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) @@ -768,7 +768,7 @@ func TestInternal_ServiceDump_ACL(t *testing.T) { dir, s := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir) @@ -1182,7 +1182,7 @@ func TestInternal_GatewayServiceDump_Terminating_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1528,7 +1528,7 @@ func TestInternal_GatewayServiceDump_Ingress_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2192,7 +2192,7 @@ func TestInternal_ServiceTopology_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2331,7 +2331,7 @@ func TestInternal_IntentionUpstreams_ACL(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/kvs_endpoint_test.go b/agent/consul/kvs_endpoint_test.go index 575d61b06..4723cfdb8 100644 --- a/agent/consul/kvs_endpoint_test.go +++ b/agent/consul/kvs_endpoint_test.go @@ -84,7 +84,7 @@ func TestKVS_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -189,7 +189,7 @@ func TestKVS_Get_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -413,7 +413,7 @@ func TestKVSEndpoint_List_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -492,7 +492,7 @@ func TestKVSEndpoint_List_ACLEnableKeyListPolicy(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.ACLEnableKeyListPolicy = true }) @@ -684,7 +684,7 @@ func TestKVSEndpoint_ListKeys_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index c66d3fa2c..2861c6cbe 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -431,28 +431,28 @@ func (s *Server) initializeACLs(ctx context.Context) error { s.logger.Info("Created ACL 'global-management' policy") } - // Check for configured master token. - if master := s.config.ACLMasterToken; len(master) > 0 { + // Check for configured initial management token. + if initialManagement := s.config.ACLInitialManagementToken; len(initialManagement) > 0 { state := s.fsm.State() - if _, err := uuid.ParseUUID(master); err != nil { - s.logger.Warn("Configuring a non-UUID master token is deprecated") + if _, err := uuid.ParseUUID(initialManagement); err != nil { + s.logger.Warn("Configuring a non-UUID initial management token is deprecated") } - _, token, err := state.ACLTokenGetBySecret(nil, master, nil) + _, token, err := state.ACLTokenGetBySecret(nil, initialManagement, nil) if err != nil { - return fmt.Errorf("failed to get master token: %v", err) + return fmt.Errorf("failed to get initial management token: %v", err) } // Ignoring expiration times to avoid an insertion collision. if token == nil { accessor, err := lib.GenerateUUID(s.checkTokenUUID) if err != nil { - return fmt.Errorf("failed to generate the accessor ID for the master token: %v", err) + return fmt.Errorf("failed to generate the accessor ID for the initial management token: %v", err) } token := structs.ACLToken{ AccessorID: accessor, - SecretID: master, - Description: "Master Token", + SecretID: initialManagement, + Description: "Initial Management Token", Policies: []structs.ACLTokenPolicyLink{ { ID: structs.ACLPolicyGlobalManagementID, @@ -472,12 +472,12 @@ func (s *Server) initializeACLs(ctx context.Context) error { ResetIndex: 0, } if _, err := s.raftApply(structs.ACLBootstrapRequestType, &req); err == nil { - s.logger.Info("Bootstrapped ACL master token from configuration") + s.logger.Info("Bootstrapped ACL initial management token from configuration") done = true } else { if err.Error() != structs.ACLBootstrapNotAllowedErr.Error() && err.Error() != structs.ACLBootstrapInvalidResetIndexErr.Error() { - return fmt.Errorf("failed to bootstrap master token: %v", err) + return fmt.Errorf("failed to bootstrap initial management token: %v", err) } } } @@ -489,10 +489,10 @@ func (s *Server) initializeACLs(ctx context.Context) error { CAS: false, } if _, err := s.raftApply(structs.ACLTokenSetRequestType, &req); err != nil { - return fmt.Errorf("failed to create master token: %v", err) + return fmt.Errorf("failed to create initial management token: %v", err) } - s.logger.Info("Created ACL master token from configuration") + s.logger.Info("Created ACL initial management token from configuration") } } } diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index 0d7ccd268..abaa45b6b 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -204,7 +204,7 @@ func TestCAManager_Initialize_Secondary(t *testing.T) { c.PrimaryDatacenter = "primary" c.Build = "1.6.0" c.ACLsEnabled = true - c.ACLMasterToken = masterToken + c.ACLInitialManagementToken = masterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.CAConfig.Config["PrivateKeyType"] = tc.keyType c.CAConfig.Config["PrivateKeyBits"] = tc.keyBits diff --git a/agent/consul/leader_federation_state_ae_test.go b/agent/consul/leader_federation_state_ae_test.go index 402fe2241..d7f6d108f 100644 --- a/agent/consul/leader_federation_state_ae_test.go +++ b/agent/consul/leader_federation_state_ae_test.go @@ -359,7 +359,7 @@ func TestLeader_FederationStateAntiEntropyPruning_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -373,7 +373,7 @@ func TestLeader_FederationStateAntiEntropyPruning_ACLDeny(t *testing.T) { c.PrimaryDatacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) diff --git a/agent/consul/leader_intentions_test.go b/agent/consul/leader_intentions_test.go index 79f1d771e..363c2036c 100644 --- a/agent/consul/leader_intentions_test.go +++ b/agent/consul/leader_intentions_test.go @@ -29,7 +29,7 @@ func TestLeader_ReplicateIntentions(t *testing.T) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.Build = "1.6.0" c.OverrideInitialSerfTags = func(tags map[string]string) { diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 3339964a0..dd748b370 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -31,7 +31,7 @@ func TestLeader_RegisterMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -106,7 +106,7 @@ func TestLeader_FailedMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -171,7 +171,7 @@ func TestLeader_LeftMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -221,7 +221,7 @@ func TestLeader_ReapMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -286,7 +286,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = true }) @@ -296,7 +296,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { dir2, s2 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -306,7 +306,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { dir3, s3 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -394,7 +394,7 @@ func TestLeader_ReapServer(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = true }) @@ -404,7 +404,7 @@ func TestLeader_ReapServer(t *testing.T) { dir2, s2 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -414,7 +414,7 @@ func TestLeader_ReapServer(t *testing.T) { dir3, s3 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "allow" c.Bootstrap = false }) @@ -473,7 +473,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -526,7 +526,7 @@ func TestLeader_Reconcile(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -875,7 +875,7 @@ func TestLeader_ReapTombstones(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.TombstoneTTL = 50 * time.Millisecond c.TombstoneTTLGranularity = 10 * time.Millisecond @@ -1180,7 +1180,7 @@ func TestLeader_ACL_Initialization(t *testing.T) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = tt.master + c.ACLInitialManagementToken = tt.master } dir1, s1 := testServerWithConfig(t, conf) defer os.RemoveAll(dir1) @@ -1225,7 +1225,7 @@ func TestLeader_ACLUpgrade_IsStickyEvenIfSerfTagsRegress(t *testing.T) { c.Datacenter = "dc1" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() diff --git a/agent/consul/operator_autopilot_endpoint_test.go b/agent/consul/operator_autopilot_endpoint_test.go index d1a9e96b6..501f0f15d 100644 --- a/agent/consul/operator_autopilot_endpoint_test.go +++ b/agent/consul/operator_autopilot_endpoint_test.go @@ -54,7 +54,7 @@ func TestOperator_Autopilot_GetConfiguration_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.AutopilotConfig.CleanupDeadServers = false }) @@ -138,7 +138,7 @@ func TestOperator_Autopilot_SetConfiguration_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.AutopilotConfig.CleanupDeadServers = false }) diff --git a/agent/consul/operator_raft_endpoint_test.go b/agent/consul/operator_raft_endpoint_test.go index 778dcbf0f..1b944b3fc 100644 --- a/agent/consul/operator_raft_endpoint_test.go +++ b/agent/consul/operator_raft_endpoint_test.go @@ -72,7 +72,7 @@ func TestOperator_RaftGetConfiguration_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -199,7 +199,7 @@ func TestOperator_RaftRemovePeerByAddress_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -305,7 +305,7 @@ func TestOperator_RaftRemovePeerByID_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" c.RaftConfig.ProtocolVersion = 3 }) diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index 05485dad7..9d82fdfa2 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -200,7 +200,7 @@ func TestPreparedQuery_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -629,7 +629,7 @@ func TestPreparedQuery_ACLDeny_Catchall_Template(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -831,7 +831,7 @@ func TestPreparedQuery_Get(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1072,7 +1072,7 @@ func TestPreparedQuery_List(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1293,7 +1293,7 @@ func TestPreparedQuery_Explain(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -1417,7 +1417,7 @@ func TestPreparedQuery_Execute(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2699,7 +2699,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -2709,7 +2709,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) { c.Datacenter = "dc2" c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index 8afb225a4..bde9b4d9e 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -882,7 +882,7 @@ func TestRPC_LocalTokenStrippedOnForward(t *testing.T) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLResolverSettings.ACLDefaultPolicy = "deny" - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -1010,7 +1010,7 @@ func TestRPC_LocalTokenStrippedOnForward_GRPC(t *testing.T) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true c.ACLResolverSettings.ACLDefaultPolicy = "deny" - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.RPCConfig.EnableStreaming = true }) s1.tokens.UpdateAgentToken("root", tokenStore.TokenSourceConfig) diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 2b562e934..1f8bc4b0e 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -74,7 +74,7 @@ func testServerACLConfig(cb func(*Config)) func(*Config) { return func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = TestDefaultMasterToken + c.ACLInitialManagementToken = TestDefaultMasterToken c.ACLResolverSettings.ACLDefaultPolicy = "deny" if cb != nil { diff --git a/agent/consul/session_endpoint_test.go b/agent/consul/session_endpoint_test.go index 58fe1d787..500bd56e3 100644 --- a/agent/consul/session_endpoint_test.go +++ b/agent/consul/session_endpoint_test.go @@ -157,7 +157,7 @@ func TestSession_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -382,7 +382,7 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -731,7 +731,7 @@ func TestSession_Renew_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/snapshot_endpoint_test.go b/agent/consul/snapshot_endpoint_test.go index 44f0dda43..be0298332 100644 --- a/agent/consul/snapshot_endpoint_test.go +++ b/agent/consul/snapshot_endpoint_test.go @@ -271,7 +271,7 @@ func TestSnapshot_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index 6c061889d..9619dc881 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -319,7 +319,7 @@ func TestTxn_Apply_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) @@ -838,7 +838,7 @@ func TestTxn_Read_ACLDeny(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.PrimaryDatacenter = "dc1" c.ACLsEnabled = true - c.ACLMasterToken = "root" + c.ACLInitialManagementToken = "root" c.ACLResolverSettings.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) From 41924683589afda8c3654d9e930764431fcbf294 Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Tue, 7 Dec 2021 12:48:50 +0000 Subject: [PATCH 51/60] Remove references to "master" ACL tokens in tests (#11751) --- agent/acl_endpoint_test.go | 33 ++++--- agent/agent_endpoint_test.go | 20 ++++- agent/agent_test.go | 24 +++--- agent/dns_test.go | 17 ++-- agent/http_oss_test.go | 6 +- agent/http_test.go | 26 ++++-- agent/keyring_test.go | 15 +++- agent/local/state_test.go | 28 ++++-- agent/remote_exec_test.go | 86 ++++++++++++++----- agent/routine-leak-checker/leak_test.go | 4 +- agent/testagent.go | 61 +++++++------ .../create/authmethod_create_test.go | 8 +- .../delete/authmethod_delete_test.go | 2 +- .../authmethod/list/authmethod_list_test.go | 4 +- .../authmethod/read/authmethod_read_test.go | 4 +- .../update/authmethod_update_test.go | 12 +-- .../create/bindingrule_create_test.go | 4 +- .../delete/bindingrule_delete_test.go | 2 +- .../bindingrule/list/bindingrule_list_test.go | 2 +- .../bindingrule/read/bindingrule_read_test.go | 2 +- .../update/bindingrule_update_test.go | 4 +- .../acl/policy/create/policy_create_test.go | 4 +- .../acl/policy/delete/policy_delete_test.go | 2 +- command/acl/policy/list/policy_list_test.go | 4 +- command/acl/policy/read/policy_read_test.go | 4 +- .../acl/policy/update/policy_update_test.go | 4 +- command/acl/role/create/role_create_test.go | 4 +- command/acl/role/delete/role_delete_test.go | 2 +- command/acl/role/list/role_list_test.go | 4 +- command/acl/role/read/role_read_test.go | 4 +- command/acl/role/update/role_update_test.go | 6 +- command/acl/rules/translate_test.go | 2 +- command/acl/token/clone/token_clone_test.go | 4 +- command/acl/token/create/token_create_test.go | 4 +- command/acl/token/delete/token_delete_test.go | 2 +- command/acl/token/list/token_list_test.go | 4 +- command/acl/token/read/token_read_test.go | 4 +- command/acl/token/update/token_update_test.go | 4 +- command/login/login_test.go | 6 +- command/logout/logout_test.go | 4 +- 40 files changed, 272 insertions(+), 164 deletions(-) diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 8c82152d1..bd8929aab 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -91,9 +91,14 @@ func TestACL_Bootstrap(t *testing.T) { } t.Parallel() - a := NewTestAgent(t, TestACLConfig()+` - acl_master_token = "" - `) + a := NewTestAgent(t, ` + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + } + `) defer a.Shutdown() tests := []struct { @@ -1689,7 +1694,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { for name, tc := range cases { tc := tc t.Run(name, func(t *testing.T) { - method, err := upsertTestCustomizedAuthMethod(a.RPC, TestDefaultMasterToken, "dc1", func(method *structs.ACLAuthMethod) { + method, err := upsertTestCustomizedAuthMethod(a.RPC, TestDefaultInitialManagementToken, "dc1", func(method *structs.ACLAuthMethod) { method.Type = "jwt" method.Config = map[string]interface{}{ "JWTSupportedAlgs": []string{"ES256"}, @@ -1758,7 +1763,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { testutil.RequireErrorContains(t, err, "Permission denied") }) - _, err = upsertTestCustomizedBindingRule(a.RPC, TestDefaultMasterToken, "dc1", func(rule *structs.ACLBindingRule) { + _, err = upsertTestCustomizedBindingRule(a.RPC, TestDefaultInitialManagementToken, "dc1", func(rule *structs.ACLBindingRule) { rule.AuthMethod = method.Name rule.BindType = structs.BindingRuleBindTypeService rule.BindName = "test--${value.name}--${value.primary_org}" @@ -1798,7 +1803,7 @@ func TestACLEndpoint_LoginLogout_jwt(t *testing.T) { // verify the token was deleted req, _ = http.NewRequest("GET", "/v1/acl/token/"+token.AccessorID, nil) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp = httptest.NewRecorder() // make the request @@ -1819,7 +1824,7 @@ func TestACL_Authorize(t *testing.T) { a1 := NewTestAgent(t, TestACLConfigWithParams(nil)) defer a1.Shutdown() - testrpc.WaitForTestAgent(t, a1.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, a1.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) policyReq := structs.ACLPolicySetRequest{ Policy: structs.ACLPolicy{ @@ -1827,7 +1832,7 @@ func TestACL_Authorize(t *testing.T) { Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `, }, Datacenter: "dc1", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } var policy structs.ACLPolicy require.NoError(t, a1.RPC("ACL.PolicySet", &policyReq, &policy)) @@ -1841,15 +1846,15 @@ func TestACL_Authorize(t *testing.T) { }, }, Datacenter: "dc1", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } var token structs.ACLToken require.NoError(t, a1.RPC("ACL.TokenSet", &tokenReq, &token)) // secondary also needs to setup a replication token to pull tokens and policies - secondaryParams := DefaulTestACLConfigParams() - secondaryParams.ReplicationToken = secondaryParams.MasterToken + secondaryParams := DefaultTestACLConfigParams() + secondaryParams.ReplicationToken = secondaryParams.InitialManagementToken secondaryParams.EnableTokenReplication = true a2 := NewTestAgent(t, `datacenter = "dc2" `+TestACLConfigWithParams(secondaryParams)) @@ -1859,7 +1864,7 @@ func TestACL_Authorize(t *testing.T) { _, err := a2.JoinWAN([]string{addr}) require.NoError(t, err) - testrpc.WaitForTestAgent(t, a2.RPC, "dc2", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, a2.RPC, "dc2", testrpc.WithToken(TestDefaultInitialManagementToken)) // this actually ensures a few things. First the dcs got connect okay, secondly that the policy we // are about ready to use in our local token creation exists in the secondary DC testrpc.WaitForACLReplication(t, a2.RPC, "dc2", structs.ACLReplicateTokens, policy.CreateIndex, 1, 0) @@ -1874,7 +1879,7 @@ func TestACL_Authorize(t *testing.T) { Local: true, }, Datacenter: "dc2", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, } var localToken structs.ACLToken @@ -2004,7 +2009,7 @@ func TestACL_Authorize(t *testing.T) { for _, dc := range []string{"dc1", "dc2"} { t.Run(dc, func(t *testing.T) { req, _ := http.NewRequest("POST", "/v1/internal/acl/authorize?dc="+dc, jsonBody(request)) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) recorder := httptest.NewRecorder() raw, err := a1.srv.ACLAuthorize(recorder, req) require.NoError(t, err) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index b699c9666..2b1087479 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1295,7 +1295,7 @@ func TestAgent_HealthServicesACLEnforcement(t *testing.T) { t.Run("root-token-health-by-id", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/id/foo1", nil) require.NoError(t, err) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp := httptest.NewRecorder() _, err = a.srv.AgentHealthServiceByID(resp, req) require.NotEqual(t, acl.ErrPermissionDenied, err) @@ -1304,7 +1304,7 @@ func TestAgent_HealthServicesACLEnforcement(t *testing.T) { t.Run("root-token-health-by-name", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/name/foo", nil) require.NoError(t, err) - req.Header.Add("X-Consul-Token", TestDefaultMasterToken) + req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp := httptest.NewRecorder() _, err = a.srv.AgentHealthServiceByName(resp, req) require.NotEqual(t, acl.ErrPermissionDenied, err) @@ -5327,9 +5327,15 @@ func TestAgent_TokenTriggersFullSync(t *testing.T) { t.Run(tt.path, func(t *testing.T) { url := fmt.Sprintf("/v1/agent/token/%s?token=root", tt.path) - a := NewTestAgent(t, TestACLConfig()+` + a := NewTestAgent(t, ` + primary_datacenter = "dc1" + acl { + enabled = true + default_policy = "deny" + tokens { + initial_management = "root" default = "" agent = "" agent_master = "" @@ -5369,9 +5375,15 @@ func TestAgent_Token(t *testing.T) { // The behavior of this handler when ACLs are disabled is vetted over // in TestACL_Disabled_Response since there's already good infra set // up over there to test this, and it calls the common function. - a := NewTestAgent(t, TestACLConfig()+` + a := NewTestAgent(t, ` + primary_datacenter = "dc1" + acl { + enabled = true + default_policy = "deny" + tokens { + initial_management = "root" default = "" agent = "" agent_master = "" diff --git a/agent/agent_test.go b/agent/agent_test.go index 7e6852797..cc3151a82 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -214,10 +214,14 @@ func TestAgent_TokenStore(t *testing.T) { t.Parallel() a := NewTestAgent(t, ` - acl_token = "user" - acl_agent_token = "agent" - acl_agent_master_token = "master"`, - ) + acl { + tokens { + default = "user" + agent = "agent" + agent_recovery = "recovery" + } + } + `) defer a.Shutdown() if got, want := a.tokens.UserToken(), "user"; got != want { @@ -226,7 +230,7 @@ func TestAgent_TokenStore(t *testing.T) { if got, want := a.tokens.AgentToken(), "agent"; got != want { t.Fatalf("got %q want %q", got, want) } - if got, want := a.tokens.IsAgentRecoveryToken("master"), true; got != want { + if got, want := a.tokens.IsAgentRecoveryToken("recovery"), true; got != want { t.Fatalf("got %v want %v", got, want) } } @@ -5037,7 +5041,7 @@ func TestAutoConfig_Integration(t *testing.T) { srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig}) defer srv.Shutdown() - testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) // sign a JWT token now := time.Now() @@ -5084,7 +5088,7 @@ func TestAutoConfig_Integration(t *testing.T) { // when this is successful we managed to get the gossip key and serf addresses to bind to // and then connect. Additionally we would have to have certificates or else the // verify_incoming config on the server would not let it work. - testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) // spot check that we now have an ACL token require.NotEmpty(t, client.tokens.AgentToken()) @@ -5098,7 +5102,7 @@ func TestAutoConfig_Integration(t *testing.T) { ca := connect.TestCA(t, nil) req := &structs.CARequest{ Datacenter: "dc1", - WriteRequest: structs.WriteRequest{Token: TestDefaultMasterToken}, + WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, Config: &structs.CAConfiguration{ Provider: "consul", Config: map[string]interface{}{ @@ -5170,7 +5174,7 @@ func TestAgent_AutoEncrypt(t *testing.T) { srv := StartTestAgent(t, TestAgent{Name: "test-server", HCL: hclConfig}) defer srv.Shutdown() - testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) client := StartTestAgent(t, TestAgent{Name: "test-client", HCL: TestACLConfigWithParams(nil) + ` bootstrap = false @@ -5193,7 +5197,7 @@ func TestAgent_AutoEncrypt(t *testing.T) { // when this is successful we managed to get a TLS certificate and are using it for // encrypted RPC connections. - testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, client.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken)) // now we need to validate that our certificate has the correct CN aeCert := client.tlsConfigurator.Cert() diff --git a/agent/dns_test.go b/agent/dns_test.go index 9d25150d2..6d4085833 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -6224,11 +6224,18 @@ func TestDNS_ServiceLookup_FilterACL(t *testing.T) { for _, tt := range tests { t.Run("ACLToken == "+tt.token, func(t *testing.T) { a := NewTestAgent(t, ` - acl_token = "`+tt.token+`" - acl_master_token = "root" - acl_datacenter = "dc1" - acl_down_policy = "deny" - acl_default_policy = "deny" + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + down_policy = "deny" + + tokens { + initial_management = "root" + default = "`+tt.token+`" + } + } `) defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") diff --git a/agent/http_oss_test.go b/agent/http_oss_test.go index db7d89e15..4bb392f61 100644 --- a/agent/http_oss_test.go +++ b/agent/http_oss_test.go @@ -72,13 +72,13 @@ func TestHTTPAPI_MethodNotAllowed_OSS(t *testing.T) { enabled = true default_policy = "deny" tokens { - master = "sekrit" - agent = "sekrit" + initial_management = "sekrit" + agent = "sekrit" } } `) defer a.Shutdown() - // Use the master token here so the wait actually works. + // Use the initial management token here so the wait actually works. testrpc.WaitForTestAgent(t, a.RPC, "dc1", testrpc.WithToken("sekrit")) all := []string{"GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS"} diff --git a/agent/http_test.go b/agent/http_test.go index 2a68b5ab0..33e2e7867 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -994,13 +994,21 @@ func TestHTTPServer_PProfHandlers_ACLs(t *testing.T) { dc1 := "dc1" a := NewTestAgent(t, ` - acl_datacenter = "`+dc1+`" - acl_default_policy = "deny" - acl_master_token = "master" - acl_agent_token = "agent" - acl_agent_master_token = "towel" - enable_debug = false -`) + primary_datacenter = "`+dc1+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "agent" + agent_recovery = "towel" + } + } + + enable_debug = false + `) cases := []struct { code int @@ -1010,7 +1018,7 @@ func TestHTTPServer_PProfHandlers_ACLs(t *testing.T) { }{ { code: http.StatusOK, - token: "master", + token: "root", endpoint: "/debug/pprof/heap", nilResponse: false, }, @@ -1034,7 +1042,7 @@ func TestHTTPServer_PProfHandlers_ACLs(t *testing.T) { }, { code: http.StatusOK, - token: "master", + token: "root", endpoint: "/debug/pprof/heap", nilResponse: false, }, diff --git a/agent/keyring_test.go b/agent/keyring_test.go index a777973cf..3362a2c70 100644 --- a/agent/keyring_test.go +++ b/agent/keyring_test.go @@ -302,10 +302,17 @@ func TestAgentKeyring_ACL(t *testing.T) { dataDir := testutil.TempDir(t, "keyfile") writeKeyRings(t, key1, dataDir) - a := StartTestAgent(t, TestAgent{HCL: TestACLConfig() + ` - acl_datacenter = "dc1" - acl_master_token = "root" - acl_default_policy = "deny" + a := StartTestAgent(t, TestAgent{HCL: ` + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } `, DataDir: dataDir}) defer a.Shutdown() diff --git a/agent/local/state_test.go b/agent/local/state_test.go index d955e38a2..af8944309 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -796,9 +796,17 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { t.Parallel() a := agent.NewTestAgent(t, ` - acl_datacenter = "dc1" - acl_master_token = "root" - acl_default_policy = "deny" `) + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } + `) defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") @@ -1241,9 +1249,17 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { t.Parallel() dc := "dc1" a := &agent.TestAgent{HCL: ` - acl_datacenter = "` + dc + `" - acl_master_token = "root" - acl_default_policy = "deny" `} + primary_datacenter = "` + dc + `" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } + `} if err := a.Start(t); err != nil { t.Fatal(err) } diff --git a/agent/remote_exec_test.go b/agent/remote_exec_test.go index fad3a1a9d..dc6489fa5 100644 --- a/agent/remote_exec_test.go +++ b/agent/remote_exec_test.go @@ -117,10 +117,17 @@ func TestRemoteExecGetSpec_ACLToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecGetSpec(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + default = "root" + } + } `, "root", true, dc) } @@ -132,10 +139,17 @@ func TestRemoteExecGetSpec_ACLAgentToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecGetSpec(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_agent_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "root" + } + } `, "root", true, dc) } @@ -147,9 +161,16 @@ func TestRemoteExecGetSpec_ACLDeny(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecGetSpec(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } `, "root", false, dc) } @@ -207,10 +228,17 @@ func TestRemoteExecWrites_ACLToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecWrites(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + default = "root" + } + } `, "root", true, dc) } @@ -222,10 +250,17 @@ func TestRemoteExecWrites_ACLAgentToken(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecWrites(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_agent_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "root" + } + } `, "root", true, dc) } @@ -237,9 +272,16 @@ func TestRemoteExecWrites_ACLDeny(t *testing.T) { t.Parallel() dc := "dc1" testRemoteExecWrites(t, ` - acl_datacenter = "`+dc+`" - acl_master_token = "root" - acl_default_policy = "deny" + primary_datacenter = "`+dc+`" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + } + } `, "root", false, dc) } diff --git a/agent/routine-leak-checker/leak_test.go b/agent/routine-leak-checker/leak_test.go index 7b8de34c7..fd64e9c05 100644 --- a/agent/routine-leak-checker/leak_test.go +++ b/agent/routine-leak-checker/leak_test.go @@ -55,7 +55,7 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent { require.NoError(t, ioutil.WriteFile(keyPath, []byte(keyPEM), 0600)) require.NoError(t, ioutil.WriteFile(caPath, []byte(caPEM), 0600)) - aclParams := agent.DefaulTestACLConfigParams() + aclParams := agent.DefaultTestACLConfigParams() aclParams.PrimaryDatacenter = "primary" aclParams.EnableTokenReplication = true @@ -76,7 +76,7 @@ func setupPrimaryServer(t *testing.T) *agent.TestAgent { a := agent.NewTestAgent(t, config) t.Cleanup(func() { a.Shutdown() }) - testrpc.WaitForTestAgent(t, a.RPC, "primary", testrpc.WithToken(agent.TestDefaultMasterToken)) + testrpc.WaitForTestAgent(t, a.RPC, "primary", testrpc.WithToken(agent.TestDefaultInitialManagementToken)) return a } diff --git a/agent/testagent.go b/agent/testagent.go index 80a4e0cf0..0119a612e 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -461,55 +461,62 @@ func TestConfig(logger hclog.Logger, sources ...config.Source) *config.RuntimeCo // with ACLs. func TestACLConfig() string { return ` - acl_datacenter = "dc1" - acl_default_policy = "deny" - acl_master_token = "root" - acl_agent_token = "root" - acl_agent_master_token = "towel" + primary_datacenter = "dc1" + + acl { + enabled = true + default_policy = "deny" + + tokens { + initial_management = "root" + agent = "root" + agent_recovery = "towel" + } + } ` } const ( - TestDefaultMasterToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a" - TestDefaultAgentMasterToken = "bca580d4-db07-4074-b766-48acc9676955'" + TestDefaultInitialManagementToken = "d9f05e83-a7ae-47ce-839e-c0d53a68c00a" + TestDefaultAgentRecoveryToken = "bca580d4-db07-4074-b766-48acc9676955'" ) type TestACLConfigParams struct { PrimaryDatacenter string DefaultPolicy string - MasterToken string + InitialManagementToken string AgentToken string DefaultToken string - AgentMasterToken string + AgentRecoveryToken string ReplicationToken string EnableTokenReplication bool } -func DefaulTestACLConfigParams() *TestACLConfigParams { +func DefaultTestACLConfigParams() *TestACLConfigParams { return &TestACLConfigParams{ - PrimaryDatacenter: "dc1", - DefaultPolicy: "deny", - MasterToken: TestDefaultMasterToken, - AgentToken: TestDefaultMasterToken, - AgentMasterToken: TestDefaultAgentMasterToken, + PrimaryDatacenter: "dc1", + DefaultPolicy: "deny", + InitialManagementToken: TestDefaultInitialManagementToken, + AgentToken: TestDefaultInitialManagementToken, + AgentRecoveryToken: TestDefaultAgentRecoveryToken, } } func (p *TestACLConfigParams) HasConfiguredTokens() bool { - return p.MasterToken != "" || + return p.InitialManagementToken != "" || p.AgentToken != "" || p.DefaultToken != "" || - p.AgentMasterToken != "" || + p.AgentRecoveryToken != "" || p.ReplicationToken != "" } func TestACLConfigNew() string { return TestACLConfigWithParams(&TestACLConfigParams{ - PrimaryDatacenter: "dc1", - DefaultPolicy: "deny", - MasterToken: "root", - AgentToken: "root", - AgentMasterToken: "towel", + PrimaryDatacenter: "dc1", + DefaultPolicy: "deny", + InitialManagementToken: "root", + AgentToken: "root", + AgentRecoveryToken: "towel", }) } @@ -525,14 +532,14 @@ var aclConfigTpl = template.Must(template.New("ACL Config").Parse(` enable_token_replication = {{printf "%t" .EnableTokenReplication }} {{- if .HasConfiguredTokens}} tokens { - {{- if ne .MasterToken ""}} - master = "{{ .MasterToken }}" + {{- if ne .InitialManagementToken ""}} + initial_management = "{{ .InitialManagementToken }}" {{- end}} {{- if ne .AgentToken ""}} agent = "{{ .AgentToken }}" {{- end}} - {{- if ne .AgentMasterToken "" }} - agent_master = "{{ .AgentMasterToken }}" + {{- if ne .AgentRecoveryToken "" }} + agent_recovery = "{{ .AgentRecoveryToken }}" {{- end}} {{- if ne .DefaultToken "" }} default = "{{ .DefaultToken }}" @@ -550,7 +557,7 @@ func TestACLConfigWithParams(params *TestACLConfigParams) string { cfg := params if params == nil { - cfg = DefaulTestACLConfigParams() + cfg = DefaultTestACLConfigParams() } err := aclConfigTpl.Execute(&buf, &cfg) diff --git a/command/acl/authmethod/create/authmethod_create_test.go b/command/acl/authmethod/create/authmethod_create_test.go index 26158c22a..0e45e624d 100644 --- a/command/acl/authmethod/create/authmethod_create_test.go +++ b/command/acl/authmethod/create/authmethod_create_test.go @@ -43,7 +43,7 @@ func TestAuthMethodCreateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -197,7 +197,7 @@ func TestAuthMethodCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -368,7 +368,7 @@ func TestAuthMethodCreateCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -514,7 +514,7 @@ func TestAuthMethodCreateCommand_config(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/delete/authmethod_delete_test.go b/command/acl/authmethod/delete/authmethod_delete_test.go index b112ab174..61ee7b169 100644 --- a/command/acl/authmethod/delete/authmethod_delete_test.go +++ b/command/acl/authmethod/delete/authmethod_delete_test.go @@ -36,7 +36,7 @@ func TestAuthMethodDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/list/authmethod_list_test.go b/command/acl/authmethod/list/authmethod_list_test.go index 4925f1f11..edf963183 100644 --- a/command/acl/authmethod/list/authmethod_list_test.go +++ b/command/acl/authmethod/list/authmethod_list_test.go @@ -37,7 +37,7 @@ func TestAuthMethodListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -118,7 +118,7 @@ func TestAuthMethodListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/read/authmethod_read_test.go b/command/acl/authmethod/read/authmethod_read_test.go index 1cc0f7771..0b4f04c0e 100644 --- a/command/acl/authmethod/read/authmethod_read_test.go +++ b/command/acl/authmethod/read/authmethod_read_test.go @@ -37,7 +37,7 @@ func TestAuthMethodReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -127,7 +127,7 @@ func TestAuthMethodReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/authmethod/update/authmethod_update_test.go b/command/acl/authmethod/update/authmethod_update_test.go index 9b9125085..8ebde83cd 100644 --- a/command/acl/authmethod/update/authmethod_update_test.go +++ b/command/acl/authmethod/update/authmethod_update_test.go @@ -42,7 +42,7 @@ func TestAuthMethodUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -179,7 +179,7 @@ func TestAuthMethodUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -270,7 +270,7 @@ func TestAuthMethodUpdateCommand_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -374,7 +374,7 @@ func TestAuthMethodUpdateCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -612,7 +612,7 @@ func TestAuthMethodUpdateCommand_k8s_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -802,7 +802,7 @@ func TestAuthMethodUpdateCommand_config(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/create/bindingrule_create_test.go b/command/acl/bindingrule/create/bindingrule_create_test.go index 6a48096e6..60744954b 100644 --- a/command/acl/bindingrule/create/bindingrule_create_test.go +++ b/command/acl/bindingrule/create/bindingrule_create_test.go @@ -36,7 +36,7 @@ func TestBindingRuleCreateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -187,7 +187,7 @@ func TestBindingRuleCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/delete/bindingrule_delete_test.go b/command/acl/bindingrule/delete/bindingrule_delete_test.go index d2ce8d6ee..1c05cd610 100644 --- a/command/acl/bindingrule/delete/bindingrule_delete_test.go +++ b/command/acl/bindingrule/delete/bindingrule_delete_test.go @@ -35,7 +35,7 @@ func TestBindingRuleDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/list/bindingrule_list_test.go b/command/acl/bindingrule/list/bindingrule_list_test.go index fedf2cda9..55d75cfa3 100644 --- a/command/acl/bindingrule/list/bindingrule_list_test.go +++ b/command/acl/bindingrule/list/bindingrule_list_test.go @@ -37,7 +37,7 @@ func TestBindingRuleListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/read/bindingrule_read_test.go b/command/acl/bindingrule/read/bindingrule_read_test.go index eeab59ff9..fcb55785d 100644 --- a/command/acl/bindingrule/read/bindingrule_read_test.go +++ b/command/acl/bindingrule/read/bindingrule_read_test.go @@ -36,7 +36,7 @@ func TestBindingRuleReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/bindingrule/update/bindingrule_update_test.go b/command/acl/bindingrule/update/bindingrule_update_test.go index 3be2f162b..1ec873630 100644 --- a/command/acl/bindingrule/update/bindingrule_update_test.go +++ b/command/acl/bindingrule/update/bindingrule_update_test.go @@ -37,7 +37,7 @@ func TestBindingRuleUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -478,7 +478,7 @@ func TestBindingRuleUpdateCommand_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/create/policy_create_test.go b/command/acl/policy/create/policy_create_test.go index 50fa4d569..8632ca228 100644 --- a/command/acl/policy/create/policy_create_test.go +++ b/command/acl/policy/create/policy_create_test.go @@ -37,7 +37,7 @@ func TestPolicyCreateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -78,7 +78,7 @@ func TestPolicyCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/delete/policy_delete_test.go b/command/acl/policy/delete/policy_delete_test.go index 1548bb7d1..3057d8000 100644 --- a/command/acl/policy/delete/policy_delete_test.go +++ b/command/acl/policy/delete/policy_delete_test.go @@ -33,7 +33,7 @@ func TestPolicyDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/list/policy_list_test.go b/command/acl/policy/list/policy_list_test.go index 4784a46cb..208c7a82e 100644 --- a/command/acl/policy/list/policy_list_test.go +++ b/command/acl/policy/list/policy_list_test.go @@ -34,7 +34,7 @@ func TestPolicyListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -89,7 +89,7 @@ func TestPolicyListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/read/policy_read_test.go b/command/acl/policy/read/policy_read_test.go index b36528719..34d35e177 100644 --- a/command/acl/policy/read/policy_read_test.go +++ b/command/acl/policy/read/policy_read_test.go @@ -34,7 +34,7 @@ func TestPolicyReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -98,7 +98,7 @@ func TestPolicyReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/policy/update/policy_update_test.go b/command/acl/policy/update/policy_update_test.go index a2f57e865..164eba699 100644 --- a/command/acl/policy/update/policy_update_test.go +++ b/command/acl/policy/update/policy_update_test.go @@ -37,7 +37,7 @@ func TestPolicyUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -88,7 +88,7 @@ func TestPolicyUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/create/role_create_test.go b/command/acl/role/create/role_create_test.go index fb9decc9e..b7a31add4 100644 --- a/command/acl/role/create/role_create_test.go +++ b/command/acl/role/create/role_create_test.go @@ -33,7 +33,7 @@ func TestRoleCreateCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -126,7 +126,7 @@ func TestRoleCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/delete/role_delete_test.go b/command/acl/role/delete/role_delete_test.go index 22e7182db..a1b941cf6 100644 --- a/command/acl/role/delete/role_delete_test.go +++ b/command/acl/role/delete/role_delete_test.go @@ -32,7 +32,7 @@ func TestRoleDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/list/role_list_test.go b/command/acl/role/list/role_list_test.go index 5de61a10b..60803c8da 100644 --- a/command/acl/role/list/role_list_test.go +++ b/command/acl/role/list/role_list_test.go @@ -35,7 +35,7 @@ func TestRoleListCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -93,7 +93,7 @@ func TestRoleListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/read/role_read_test.go b/command/acl/role/read/role_read_test.go index 20946c60e..8751d4b44 100644 --- a/command/acl/role/read/role_read_test.go +++ b/command/acl/role/read/role_read_test.go @@ -35,7 +35,7 @@ func TestRoleReadCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -203,7 +203,7 @@ func TestRoleReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/role/update/role_update_test.go b/command/acl/role/update/role_update_test.go index e3b8ecfa5..ebc49945b 100644 --- a/command/acl/role/update/role_update_test.go +++ b/command/acl/role/update/role_update_test.go @@ -35,7 +35,7 @@ func TestRoleUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -209,7 +209,7 @@ func TestRoleUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -292,7 +292,7 @@ func TestRoleUpdateCommand_noMerge(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/rules/translate_test.go b/command/acl/rules/translate_test.go index 73fb34693..830cb2403 100644 --- a/command/acl/rules/translate_test.go +++ b/command/acl/rules/translate_test.go @@ -35,7 +35,7 @@ func TestRulesTranslateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/clone/token_clone_test.go b/command/acl/token/clone/token_clone_test.go index a198597f7..4d5c86b37 100644 --- a/command/acl/token/clone/token_clone_test.go +++ b/command/acl/token/clone/token_clone_test.go @@ -70,7 +70,7 @@ func TestTokenCloneCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -178,7 +178,7 @@ func TestTokenCloneCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/create/token_create_test.go b/command/acl/token/create/token_create_test.go index e35866cb4..f3174988f 100644 --- a/command/acl/token/create/token_create_test.go +++ b/command/acl/token/create/token_create_test.go @@ -33,7 +33,7 @@ func TestTokenCreateCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -131,7 +131,7 @@ func TestTokenCreateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/delete/token_delete_test.go b/command/acl/token/delete/token_delete_test.go index 4aca85830..36a1d521c 100644 --- a/command/acl/token/delete/token_delete_test.go +++ b/command/acl/token/delete/token_delete_test.go @@ -33,7 +33,7 @@ func TestTokenDeleteCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/list/token_list_test.go b/command/acl/token/list/token_list_test.go index 21ceffa1a..ba6d3949c 100644 --- a/command/acl/token/list/token_list_test.go +++ b/command/acl/token/list/token_list_test.go @@ -35,7 +35,7 @@ func TestTokenListCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -90,7 +90,7 @@ func TestTokenListCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/read/token_read_test.go b/command/acl/token/read/token_read_test.go index dd41a736e..c74c5eec9 100644 --- a/command/acl/token/read/token_read_test.go +++ b/command/acl/token/read/token_read_test.go @@ -35,7 +35,7 @@ func TestTokenReadCommand_Pretty(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -83,7 +83,7 @@ func TestTokenReadCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/acl/token/update/token_update_test.go b/command/acl/token/update/token_update_test.go index 0e3547e1e..924e6052c 100644 --- a/command/acl/token/update/token_update_test.go +++ b/command/acl/token/update/token_update_test.go @@ -34,7 +34,7 @@ func TestTokenUpdateCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -166,7 +166,7 @@ func TestTokenUpdateCommand_JSON(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/login/login_test.go b/command/login/login_test.go index dcc4aaffa..3d730548d 100644 --- a/command/login/login_test.go +++ b/command/login/login_test.go @@ -44,7 +44,7 @@ func TestLoginCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -241,7 +241,7 @@ func TestLoginCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -339,7 +339,7 @@ func TestLoginCommand_jwt(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) diff --git a/command/logout/logout_test.go b/command/logout/logout_test.go index f0810a7a2..82e82bc03 100644 --- a/command/logout/logout_test.go +++ b/command/logout/logout_test.go @@ -35,7 +35,7 @@ func TestLogoutCommand(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) @@ -165,7 +165,7 @@ func TestLogoutCommand_k8s(t *testing.T) { acl { enabled = true tokens { - master = "root" + initial_management = "root" } }`) From 6626f91ff1686cf7b0013eb99af4b3b1ee6a8366 Mon Sep 17 00:00:00 2001 From: Mathew Estafanous <56979977+Mathew-Estafanous@users.noreply.github.com> Date: Tue, 7 Dec 2021 09:44:03 -0500 Subject: [PATCH 52/60] Transition all endpoint tests in agent_endpoint_test.go to go through ServeHTTP (#11499) --- agent/agent_endpoint.go | 2 +- agent/agent_endpoint_test.go | 1587 ++++++++++++++++++---------------- 2 files changed, 819 insertions(+), 770 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 9646f7f28..c7ec72661 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1005,7 +1005,7 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt } notFoundReason := fmt.Sprintf("ServiceId %s not found", sid.String()) if returnTextPlain(req) { - return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "application/json"} + return notFoundReason, CodeWithPayloadError{StatusCode: http.StatusNotFound, Reason: notFoundReason, ContentType: "text/plain"} } return &api.AgentServiceChecksInfo{ AggregatedStatus: api.HealthCritical, diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 2b1087479..3d4a80beb 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -53,8 +53,8 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq)) resp := httptest.NewRecorder() - _, err := srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) tokenReq := &structs.ACLToken{ Description: "agent-read-token-for-test", @@ -63,10 +63,12 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err := srv.ACLTokenCreate(resp, req) + srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + svcToken := &structs.ACLToken{} + dec := json.NewDecoder(resp.Body) + err := dec.Decode(svcToken) require.NoError(t, err) - svcToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) return svcToken.SecretID } @@ -283,13 +285,21 @@ func TestAgent_Services_MeshGateway(t *testing.T) { a.State.AddService(srv1, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(httptest.NewRecorder(), req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) require.NoError(t, err) - val := obj.(map[string]*api.AgentService) + require.Len(t, val, 1) actual := val["mg-dc1-01"] require.NotNil(t, actual) require.Equal(t, api.ServiceKindMeshGateway, actual.Kind) + // Proxy.ToAPI() creates an empty Upstream list instead of keeping nil so do the same with actual. + if actual.Proxy.Upstreams == nil { + actual.Proxy.Upstreams = make([]api.Upstream, 0) + } require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy) } @@ -319,13 +329,21 @@ func TestAgent_Services_TerminatingGateway(t *testing.T) { require.NoError(t, a.State.AddService(srv1, "")) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(httptest.NewRecorder(), req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) require.NoError(t, err) - val := obj.(map[string]*api.AgentService) + require.Len(t, val, 1) actual := val["tg-dc1-01"] require.NotNil(t, actual) require.Equal(t, api.ServiceKindTerminatingGateway, actual.Kind) + // Proxy.ToAPI() creates an empty Upstream list instead of keeping nil so do the same with actual. + if actual.Proxy.Upstreams == nil { + actual.Proxy.Upstreams = make([]api.Upstream, 0) + } require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy) } @@ -357,17 +375,21 @@ func TestAgent_Services_ACLFilter(t *testing.T) { } t.Run("no token", func(t *testing.T) { - require := require.New(t) + req, _ := http.NewRequest("GET", "/v1/agent/services", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) + if err != nil { + t.Fatalf("Err: %v", err) + } - req := httptest.NewRequest("GET", "/v1/agent/services", nil) - rsp := httptest.NewRecorder() - - obj, err := a.srv.AgentServices(rsp, req) - require.NoError(err) - - val := obj.(map[string]*api.AgentService) - require.Empty(val) - require.Empty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + if len(val) != 0 { + t.Fatalf("bad: %v", val) + } + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("limited token", func(t *testing.T) { @@ -380,28 +402,30 @@ func TestAgent_Services_ACLFilter(t *testing.T) { `) req := httptest.NewRequest("GET", fmt.Sprintf("/v1/agent/services?token=%s", token), nil) - rsp := httptest.NewRecorder() + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) - obj, err := a.srv.AgentServices(rsp, req) - require.NoError(err) - - val := obj.(map[string]*api.AgentService) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } require.Len(val, 1) - require.NotEmpty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("root token", func(t *testing.T) { - require := require.New(t) - - req := httptest.NewRequest("GET", "/v1/agent/services?token=root", nil) - rsp := httptest.NewRecorder() - - obj, err := a.srv.AgentServices(rsp, req) - require.NoError(err) - - val := obj.(map[string]*api.AgentService) - require.Len(val, 2) - require.Empty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + req, _ := http.NewRequest("GET", "/v1/agent/services?token=root", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[string]*api.AgentService + err := dec.Decode(&val) + if err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 2) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) } @@ -552,8 +576,7 @@ func TestAgent_Service(t *testing.T) { // don't alter it and affect later test cases. req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(updatedProxy)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) }, wantWait: 100 * time.Millisecond, @@ -586,8 +609,7 @@ func TestAgent_Service(t *testing.T) { // Re-register with _same_ proxy config req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) }, wantWait: 200 * time.Millisecond, @@ -679,8 +701,7 @@ func TestAgent_Service(t *testing.T) { { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(sidecarProxy)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } @@ -698,14 +719,11 @@ func TestAgent_Service(t *testing.T) { go tt.updateFunc() } start := time.Now() - obj, err := a.srv.AgentService(resp, req) + a.srv.h.ServeHTTP(resp, req) elapsed := time.Since(start) if tt.wantErr != "" { - require.Error(err) - require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr)) - } else { - require.NoError(err) + require.Contains(strings.ToLower(resp.Body.String()), strings.ToLower(tt.wantErr)) } if tt.wantCode != 0 { require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String()) @@ -719,12 +737,13 @@ func TestAgent_Service(t *testing.T) { } if tt.wantResp != nil { - assert.Equal(tt.wantResp, obj) + dec := json.NewDecoder(resp.Body) + val := &api.AgentService{} + err := dec.Decode(&val) + require.NoError(err) + + assert.Equal(tt.wantResp, val) assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash")) - } else { - // Janky but Equal doesn't help here because nil != - // *api.AgentService((*api.AgentService)(nil)) - assert.Nil(obj) } }) } @@ -751,25 +770,29 @@ func TestAgent_Checks(t *testing.T) { a.State.AddCheck(chk1, "") req, _ := http.NewRequest("GET", "/v1/agent/checks", nil) - obj, err := a.srv.AgentChecks(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[types.CheckID]*structs.HealthCheck + err := dec.Decode(&val) if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[types.CheckID]*structs.HealthCheck) + if len(val) != 1 { - t.Fatalf("bad checks: %v", obj) + t.Fatalf("bad checks: %v", val) } if val["mysql"].Status != api.HealthPassing { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } if val["mysql"].Node != chk1.Node { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } if val["mysql"].Interval != chk1.Interval { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } if val["mysql"].Timeout != chk1.Timeout { - t.Fatalf("bad check: %v", obj) + t.Fatalf("bad check: %v", val) } } @@ -800,9 +823,13 @@ func TestAgent_ChecksWithFilter(t *testing.T) { a.State.AddCheck(chk2, "") req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape("Name == `redis`"), nil) - obj, err := a.srv.AgentChecks(nil, req) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[types.CheckID]*structs.HealthCheck + err := dec.Decode(&val) require.NoError(t, err) - val := obj.(map[types.CheckID]*structs.HealthCheck) + require.Len(t, val, 1) _, ok := val["redis"] require.True(t, ok) @@ -822,21 +849,29 @@ func TestAgent_HealthServiceByID(t *testing.T) { ID: "mysql", Service: "mysql", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + + serviceReq := AddServiceRequest{ + Service: service, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, + } + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql2", Service: "mysql2", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql3", Service: "mysql3", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } @@ -918,41 +953,28 @@ func TestAgent_HealthServiceByID(t *testing.T) { t.Helper() req, _ := http.NewRequest("GET", url+"?format=text", nil) resp := httptest.NewRecorder() - data, err := a.srv.AgentHealthServiceByID(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) - } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { - t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload) - } - body, ok := data.(string) - if !ok { - t.Fatalf("Cannot get result as string in := %#v", data) + a.srv.h.ServeHTTP(resp, req) + body := resp.Body.String() + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: expected %d, but had: %d", expectedCode, resp.Code) } if got, want := body, expected; got != want { t.Fatalf("got body %q want %q", got, want) } - if got, want := codeWithPayload.Reason, expected; got != want { - t.Fatalf("got body %q want %q", got, want) - } }) t.Run("format=json", func(t *testing.T) { req, _ := http.NewRequest("GET", url, nil) resp := httptest.NewRecorder() - dataRaw, err := a.srv.AgentHealthServiceByID(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) + a.srv.h.ServeHTTP(resp, req) + if got, want := resp.Code, expectedCode; got != want { + t.Fatalf("returned bad status: expected %d, but had: %d", expectedCode, resp.Code) } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { - t.Fatalf("returned bad status: expected %d, but had: %d in %#v", expectedCode, codeWithPayload.StatusCode, codeWithPayload) + dec := json.NewDecoder(resp.Body) + data := &api.AgentServiceChecksInfo{} + if err := dec.Decode(data); err != nil { + t.Fatalf("Cannot convert result from JSON: %v", err) } - data, ok := dataRaw.(*api.AgentServiceChecksInfo) - if !ok { - t.Fatalf("Cannot connvert result to JSON: %#v", dataRaw) - } - if codeWithPayload.StatusCode != http.StatusNotFound { + if resp.Code != http.StatusNotFound { if data != nil && data.AggregatedStatus != expected { t.Fatalf("got body %v want %v", data, expected) } @@ -1020,42 +1042,49 @@ func TestAgent_HealthServiceByName(t *testing.T) { ID: "mysql1", Service: "mysql-pool-r", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + serviceReq := AddServiceRequest{ + Service: service, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, + } + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql2", Service: "mysql-pool-r", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql3", Service: "mysql-pool-rw", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "mysql4", Service: "mysql-pool-rw", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "httpd1", Service: "httpd", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "httpd2", Service: "httpd", } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { + if err := a.AddService(serviceReq); err != nil { t.Fatalf("err: %v", err) } @@ -1169,18 +1198,11 @@ func TestAgent_HealthServiceByName(t *testing.T) { t.Helper() req, _ := http.NewRequest("GET", url+"?format=text", nil) resp := httptest.NewRecorder() - data, err := a.srv.AgentHealthServiceByName(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) - } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { + a.srv.h.ServeHTTP(resp, req) + if got, want := resp.Code, expectedCode; got != want { t.Fatalf("returned bad status: %d. Body: %q", resp.Code, resp.Body.String()) } - if got, want := codeWithPayload.Reason, expected; got != want { - t.Fatalf("got reason %q want %q", got, want) - } - if got, want := data, expected; got != want { + if got, want := resp.Body.String(), expected; got != want { t.Fatalf("got body %q want %q", got, want) } }) @@ -1188,21 +1210,26 @@ func TestAgent_HealthServiceByName(t *testing.T) { t.Helper() req, _ := http.NewRequest("GET", url, nil) resp := httptest.NewRecorder() - dataRaw, err := a.srv.AgentHealthServiceByName(resp, req) - codeWithPayload, ok := err.(CodeWithPayloadError) - if !ok { - t.Fatalf("Err: %v", err) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + data := make([]*api.AgentServiceChecksInfo, 0) + if err := dec.Decode(&data); err != nil { + t.Fatalf("Cannot convert result from JSON: %v", err) } - data, ok := dataRaw.([]api.AgentServiceChecksInfo) - if !ok { - t.Fatalf("Cannot connvert result to JSON") - } - if got, want := codeWithPayload.StatusCode, expectedCode; got != want { + if got, want := resp.Code, expectedCode; got != want { t.Fatalf("returned bad code: %d. Body: %#v", resp.Code, data) } if resp.Code != http.StatusNotFound { - if codeWithPayload.Reason != expected { - t.Fatalf("got wrong status %#v want %#v", codeWithPayload, expected) + matched := false + for _, d := range data { + if d.AggregatedStatus == expected { + matched = true + break + } + } + + if !matched { + t.Fatalf("got wrong status, wanted %#v", expected) } } }) @@ -1267,29 +1294,36 @@ func TestAgent_HealthServicesACLEnforcement(t *testing.T) { ID: "mysql1", Service: "mysql", } - require.NoError(t, a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal)) + serviceReq := AddServiceRequest{ + Service: service, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, + } + require.NoError(t, a.AddService(serviceReq)) - service = &structs.NodeService{ + serviceReq.Service = &structs.NodeService{ ID: "foo1", Service: "foo", } - require.NoError(t, a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal)) + require.NoError(t, a.AddService(serviceReq)) // no token t.Run("no-token-health-by-id", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/id/mysql1", nil) require.NoError(t, err) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByID(resp, req) - require.Equal(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("no-token-health-by-name", func(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/health/service/name/mysql", nil) require.NoError(t, err) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByName(resp, req) - require.Equal(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root-token-health-by-id", func(t *testing.T) { @@ -1297,8 +1331,8 @@ func TestAgent_HealthServicesACLEnforcement(t *testing.T) { require.NoError(t, err) req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByID(resp, req) - require.NotEqual(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("root-token-health-by-name", func(t *testing.T) { @@ -1306,8 +1340,8 @@ func TestAgent_HealthServicesACLEnforcement(t *testing.T) { require.NoError(t, err) req.Header.Add("X-Consul-Token", TestDefaultInitialManagementToken) resp := httptest.NewRecorder() - _, err = a.srv.AgentHealthServiceByName(resp, req) - require.NotEqual(t, acl.ErrPermissionDenied, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -1341,17 +1375,18 @@ func TestAgent_Checks_ACLFilter(t *testing.T) { } t.Run("no token", func(t *testing.T) { - require := require.New(t) + req, _ := http.NewRequest("GET", "/v1/agent/checks", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) - req := httptest.NewRequest("GET", "/v1/agent/checks", nil) - rsp := httptest.NewRecorder() + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } - obj, err := a.srv.AgentChecks(rsp, req) - require.NoError(err) - - val := obj.(map[types.CheckID]*structs.HealthCheck) - require.Empty(val) - require.Empty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("limited token", func(t *testing.T) { @@ -1367,28 +1402,30 @@ func TestAgent_Checks_ACLFilter(t *testing.T) { `, a.Config.NodeName)) req := httptest.NewRequest("GET", fmt.Sprintf("/v1/agent/checks?token=%s", token), nil) - rsp := httptest.NewRecorder() + resp := httptest.NewRecorder() - obj, err := a.srv.AgentChecks(rsp, req) - require.NoError(err) - - val := obj.(map[types.CheckID]*structs.HealthCheck) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + var val map[types.CheckID]*structs.HealthCheck + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } require.Len(val, 1) - require.NotEmpty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("root token", func(t *testing.T) { - require := require.New(t) + req, _ := http.NewRequest("GET", "/v1/agent/checks?token=root", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) - req := httptest.NewRequest("GET", "/v1/agent/checks?token=root", nil) - rsp := httptest.NewRecorder() - - obj, err := a.srv.AgentChecks(rsp, req) - require.NoError(err) - - val := obj.(map[types.CheckID]*structs.HealthCheck) - require.Len(val, 2) - require.Empty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + dec := json.NewDecoder(resp.Body) + val := make(map[types.CheckID]*structs.HealthCheck) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 2) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) } @@ -1432,12 +1469,15 @@ func TestAgent_Self(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/self", nil) - obj, err := a.srv.AgentSelf(nil, req) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := &Self{} + require.NoError(t, dec.Decode(val)) - val := obj.(Self) require.Equal(t, a.Config.SerfPortLAN, int(val.Member.Port)) - require.Equal(t, a.Config.SerfPortLAN, val.DebugConfig["SerfPortLAN"].(int)) + require.Equal(t, a.Config.SerfPortLAN, int(val.DebugConfig["SerfPortLAN"].(float64))) cs, err := a.GetLANCoordinate() require.NoError(t, err) @@ -1472,24 +1512,24 @@ func TestAgent_Self_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/self", nil) - if _, err := a.srv.AgentSelf(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/self?token=towel", nil) - if _, err := a.srv.AgentSelf(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/self?token=%s", ro), nil) - if _, err := a.srv.AgentSelf(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -1505,24 +1545,24 @@ func TestAgent_Metrics_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/metrics", nil) - if _, err := a.srv.AgentMetrics(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/metrics?token=towel", nil) - if _, err := a.srv.AgentMetrics(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/metrics?token=%s", ro), nil) - if _, err := a.srv.AgentMetrics(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -1854,17 +1894,17 @@ func TestAgent_Reload_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/reload", nil) - if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/reload?token=%s", ro), nil) - if _, err := a.srv.AgentReload(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) // This proves we call the ACL function, and we've got the other reload @@ -1884,17 +1924,21 @@ func TestAgent_Members(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/members", nil) - obj, err := a.srv.AgentMembers(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.([]serf.Member) + if len(val) == 0 { - t.Fatalf("bad members: %v", obj) + t.Fatalf("bad members: %v", val) } if int(val[0].Port) != a.Config.SerfPortLAN { - t.Fatalf("not lan: %v", obj) + t.Fatalf("not lan: %v", val) } } @@ -1909,17 +1953,21 @@ func TestAgent_Members_WAN(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/members?wan=true", nil) - obj, err := a.srv.AgentMembers(nil, req) - if err != nil { + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { t.Fatalf("Err: %v", err) } - val := obj.([]serf.Member) + if len(val) == 0 { - t.Fatalf("bad members: %v", obj) + t.Fatalf("bad members: %v", val) } if int(val[0].Port) != a.Config.SerfPortWAN { - t.Fatalf("not wan: %v", obj) + t.Fatalf("not wan: %v", val) } } @@ -1941,21 +1989,22 @@ func TestAgent_Members_ACLFilter(t *testing.T) { testrpc.WaitForLeader(t, b.RPC, "dc1") joinPath := fmt.Sprintf("/v1/agent/join/127.0.0.1:%d?token=root", b.Config.SerfPortLAN) - _, err := a.srv.AgentJoin(nil, httptest.NewRequest(http.MethodPut, joinPath, nil)) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, httptest.NewRequest(http.MethodPut, joinPath, nil)) + require.Equal(t, http.StatusOK, resp.Code) t.Run("no token", func(t *testing.T) { - require := require.New(t) + req, _ := http.NewRequest("GET", "/v1/agent/members", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) - req := httptest.NewRequest("GET", "/v1/agent/members", nil) - rsp := httptest.NewRecorder() - - obj, err := a.srv.AgentMembers(rsp, req) - require.NoError(err) - - val := obj.([]serf.Member) - require.Empty(val) - require.Empty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 0) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("limited token", func(t *testing.T) { @@ -1968,28 +2017,30 @@ func TestAgent_Members_ACLFilter(t *testing.T) { `, b.Config.NodeName)) req := httptest.NewRequest("GET", fmt.Sprintf("/v1/agent/members?token=%s", token), nil) - rsp := httptest.NewRecorder() + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) - obj, err := a.srv.AgentMembers(rsp, req) - require.NoError(err) - - val := obj.([]serf.Member) + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } require.Len(val, 1) - require.NotEmpty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + require.NotEmpty(resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) t.Run("root token", func(t *testing.T) { - require := require.New(t) + req, _ := http.NewRequest("GET", "/v1/agent/members?token=root", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) - req := httptest.NewRequest("GET", "/v1/agent/members?token=root", nil) - rsp := httptest.NewRecorder() - - obj, err := a.srv.AgentMembers(rsp, req) - require.NoError(err) - - val := obj.([]serf.Member) - require.Len(val, 2) - require.Empty(rsp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) + dec := json.NewDecoder(resp.Body) + val := make([]serf.Member, 0) + if err := dec.Decode(&val); err != nil { + t.Fatalf("Err: %v", err) + } + require.Len(t, val, 2) + require.Empty(t, resp.Header().Get("X-Consul-Results-Filtered-By-ACLs")) }) } @@ -2008,13 +2059,8 @@ func TestAgent_Join(t *testing.T) { addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortLAN) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) - obj, err := a1.srv.AgentJoin(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) if len(a1.LANMembersInAgentPartition()) != 2 { t.Fatalf("should have 2 members") @@ -2042,13 +2088,8 @@ func TestAgent_Join_WAN(t *testing.T) { addr := fmt.Sprintf("127.0.0.1:%d", a2.Config.SerfPortWAN) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?wan=true", addr), nil) - obj, err := a1.srv.AgentJoin(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) if len(a1.WANMembers()) != 2 { t.Fatalf("should have 2 members") @@ -2078,25 +2119,27 @@ func TestAgent_Join_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s", addr), nil) - if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=towel", addr), nil) - _, err := a1.srv.AgentJoin(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusOK, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a1.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/join/%s?token=%s", addr, ro), nil) - if _, err := a1.srv.AgentJoin(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) } @@ -2162,13 +2205,10 @@ func TestAgent_Leave(t *testing.T) { // Graceful leave now req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil) - obj, err := a2.srv.AgentLeave(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a2.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + retry.Run(t, func(r *retry.R) { m := a1.LANMembersInAgentPartition() if got, want := m[1].Status, serf.StatusLeft; got != want { @@ -2189,26 +2229,29 @@ func TestAgent_Leave_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/leave", nil) - if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/leave?token=%s", ro), nil) - if _, err := a.srv.AgentLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusForbidden, resp.Code) }) // this sub-test will change the state so that there is no leader. // it must therefore be the last one in this list. t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/leave?token=towel", nil) - if _, err := a.srv.AgentLeave(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2243,13 +2286,10 @@ func TestAgent_ForceLeave(t *testing.T) { // Force leave now req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s", a2.Config.NodeName), nil) - obj, err := a1.srv.AgentForceLeave(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + retry.Run(t, func(r *retry.R) { m := a1.LANMembersInAgentPartition() if got, want := m[1].Status, serf.StatusLeft; got != want { @@ -2287,24 +2327,24 @@ func TestAgent_ForceLeave_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", uri, nil) - if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("agent master token", func(t *testing.T) { req, _ := http.NewRequest("PUT", uri+"?token=towel", nil) - if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("read-only token", func(t *testing.T) { ro := createACLTokenWithAgentReadPolicy(t, a.srv) req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", ro), nil) - if _, err := a.srv.AgentForceLeave(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("operator write token", func(t *testing.T) { @@ -2315,9 +2355,9 @@ func TestAgent_ForceLeave_ACLDeny(t *testing.T) { opToken := testCreateToken(t, a, rules) req, _ := http.NewRequest("PUT", fmt.Sprintf(uri+"?token=%s", opToken), nil) - if _, err := a.srv.AgentForceLeave(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2356,13 +2396,9 @@ func TestAgent_ForceLeavePrune(t *testing.T) { // Force leave now req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/agent/force-leave/%s?prune=true", a2.Config.NodeName), nil) - obj, err := a1.srv.AgentForceLeave(nil, req) - if err != nil { - t.Fatalf("Err: %v", err) - } - if obj != nil { - t.Fatalf("Err: %v", obj) - } + resp := httptest.NewRecorder() + a1.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) retry.Run(t, func(r *retry.R) { m := len(a1.LANMembersInAgentPartition()) if m != 1 { @@ -2454,13 +2490,9 @@ func TestAgent_RegisterCheck(t *testing.T) { TTL: 15 * time.Second, } req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) - obj, err := a.srv.AgentRegisterCheck(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // Ensure we have a check mapping checkID := structs.NewCheckID("test", nil) @@ -2531,9 +2563,7 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) { t.Run(tt.name+" as node check", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(tt.check)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterCheck(resp, req); err != nil { - t.Fatalf("err: %v", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("bad: %d", resp.Code) } @@ -2548,9 +2578,7 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterService(resp, req); err != nil { - t.Fatalf("err: %v", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("bad: %d", resp.Code) } @@ -2565,9 +2593,7 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - if _, err := a.srv.AgentRegisterService(resp, req); err != nil { - t.Fatalf("err: %v", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != http.StatusOK { t.Fatalf("bad: %d", resp.Code) } @@ -2592,12 +2618,12 @@ func TestAgent_RegisterCheckScriptsExecDisable(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) res := httptest.NewRecorder() - _, err := a.srv.AgentRegisterCheck(res, req) - if err == nil { - t.Fatalf("expected error but got nil") + a.srv.h.ServeHTTP(res, req) + if http.StatusInternalServerError != res.Code { + t.Fatalf("expected 500 code error but got %v", res.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(res.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", res.Body.String()) } checkID := structs.NewCheckID("test", nil) require.Nil(t, a.State.Check(checkID), "check registered with exec disabled") @@ -2622,12 +2648,12 @@ func TestAgent_RegisterCheckScriptsExecRemoteDisable(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args)) res := httptest.NewRecorder() - _, err := a.srv.AgentRegisterCheck(res, req) - if err == nil { - t.Fatalf("expected error but got nil") + a.srv.h.ServeHTTP(res, req) + if http.StatusInternalServerError != res.Code { + t.Fatalf("expected 500 code error but got %v", res.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(res.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", res.Body.String()) } checkID := structs.NewCheckID("test", nil) require.Nil(t, a.State.Check(checkID), "check registered with exec disabled") @@ -2649,12 +2675,10 @@ func TestAgent_RegisterCheck_Passing(t *testing.T) { Status: api.HealthPassing, } req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) - obj, err := a.srv.AgentRegisterCheck(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expcted 200 but got %v", resp.Code) } // Ensure we have a check mapping @@ -2691,8 +2715,9 @@ func TestAgent_RegisterCheck_BadStatus(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(args)) resp := httptest.NewRecorder() a.srv.h.ServeHTTP(resp, req) - require.Equalf(t, http.StatusBadRequest, resp.Code, "resp: %v", resp.Body.String()) - require.Contains(t, resp.Body.String(), "Bad check status") + if resp.Code != http.StatusBadRequest { + t.Fatalf("accepted bad status") + } } func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { @@ -2725,8 +2750,8 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { // ensure the service is ready for registering a check for it. req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(svc)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // create a policy that has write on service foo policyReq := &structs.ACLPolicy{ @@ -2736,8 +2761,8 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq)) resp = httptest.NewRecorder() - _, err = a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // create a policy that has write on the node name of the agent policyReq = &structs.ACLPolicy{ @@ -2747,8 +2772,8 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(policyReq)) resp = httptest.NewRecorder() - _, err = a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // create a token using the write-foo policy tokenReq := &structs.ACLToken{ @@ -2762,10 +2787,14 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - svcToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec := json.NewDecoder(resp.Body) + svcToken := &structs.ACLToken{} + if err := dec.Decode(svcToken); err != nil { + t.Fatalf("err: %v", err) + } require.NotNil(t, svcToken) // create a token using the write-node policy @@ -2780,57 +2809,67 @@ func TestAgent_RegisterCheck_ACLDeny(t *testing.T) { req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err = a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - nodeToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec = json.NewDecoder(resp.Body) + nodeToken := &structs.ACLToken{} + if err := dec.Decode(nodeToken); err != nil { + t.Fatalf("err: %v", err) + } require.NotNil(t, nodeToken) t.Run("no token - node check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(nodeCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("svc token - node check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(nodeCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("node token - node check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(nodeCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.NoError(r, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) }) t.Run("no token - svc check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(svcCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("node token - svc check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+nodeToken.SecretID, jsonReader(svcCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.True(r, acl.IsErrPermissionDenied(err)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) }) t.Run("svc token - svc check", func(t *testing.T) { retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token="+svcToken.SecretID, jsonReader(svcCheck)) - _, err := a.srv.AgentRegisterCheck(nil, req) - require.NoError(r, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) }) } @@ -2851,12 +2890,10 @@ func TestAgent_DeregisterCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil) - obj, err := a.srv.AgentDeregisterCheck(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } // Ensure we have a check mapping @@ -2880,16 +2917,16 @@ func TestAgent_DeregisterCheckACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test", nil) - if _, err := a.srv.AgentDeregisterCheck(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/deregister/test?token=root", nil) - if _, err := a.srv.AgentDeregisterCheck(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2910,12 +2947,11 @@ func TestAgent_PassCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil) - obj, err := a.srv.AgentCheckPass(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 by got %v", resp.Code) } // Ensure we have a check mapping @@ -2943,16 +2979,16 @@ func TestAgent_PassCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test", nil) - if _, err := a.srv.AgentCheckPass(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/pass/test?token=root", nil) - if _, err := a.srv.AgentCheckPass(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -2973,12 +3009,11 @@ func TestAgent_WarnCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil) - obj, err := a.srv.AgentCheckWarn(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 by got %v", resp.Code) } // Ensure we have a check mapping @@ -3006,16 +3041,16 @@ func TestAgent_WarnCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test", nil) - if _, err := a.srv.AgentCheckWarn(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/warn/test?token=root", nil) - if _, err := a.srv.AgentCheckWarn(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -3036,12 +3071,11 @@ func TestAgent_FailCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil) - obj, err := a.srv.AgentCheckFail(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 by got %v", resp.Code) } // Ensure we have a check mapping @@ -3069,16 +3103,16 @@ func TestAgent_FailCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test", nil) - if _, err := a.srv.AgentCheckFail(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/fail/test?token=root", nil) - if _, err := a.srv.AgentCheckFail(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -3109,14 +3143,8 @@ func TestAgent_UpdateCheck(t *testing.T) { t.Run(c.Status, func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(c)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentCheckUpdate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } - if resp.Code != 200 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { t.Fatalf("expected 200, got %d", resp.Code) } @@ -3134,14 +3162,8 @@ func TestAgent_UpdateCheck(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentCheckUpdate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } - if resp.Code != 200 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { t.Fatalf("expected 200, got %d", resp.Code) } @@ -3158,14 +3180,8 @@ func TestAgent_UpdateCheck(t *testing.T) { args := checkUpdate{Status: "itscomplicated"} req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentCheckUpdate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) - } - if resp.Code != 400 { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusBadRequest { t.Fatalf("expected 400, got %d", resp.Code) } }) @@ -3190,17 +3206,17 @@ func TestAgent_UpdateCheck_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { args := checkUpdate{api.HealthPassing, "hello-passing"} req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test", jsonReader(args)) - if _, err := a.srv.AgentCheckUpdate(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { args := checkUpdate{api.HealthPassing, "hello-passing"} req, _ := http.NewRequest("PUT", "/v1/agent/check/update/test?token=root", jsonReader(args)) - if _, err := a.srv.AgentCheckUpdate(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -3248,13 +3264,10 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) { }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) - - obj, err := a.srv.AgentRegisterService(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } // Ensure the service @@ -3337,8 +3350,9 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) { }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - _, err := a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) args = &structs.ServiceDefinition{ Name: "test", @@ -3361,8 +3375,9 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) { }, } req, _ = http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - _, err = a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp = httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) checks := a.State.Checks(structs.DefaultEnterpriseMetaInDefaultPartition()) require.Equal(t, 3, len(checks)) @@ -3417,8 +3432,9 @@ func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, ex }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args)) - _, err := a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) args = &structs.ServiceDefinition{ Name: "test", @@ -3440,8 +3456,9 @@ func testAgent_RegisterService_ReRegister_ReplaceExistingChecks(t *testing.T, ex }, } req, _ = http.NewRequest("PUT", "/v1/agent/service/register?replace-existing-checks", jsonReader(args)) - _, err = a.srv.AgentRegisterService(nil, req) - require.NoError(t, err) + resp = httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) checks := a.State.Checks(structs.DefaultEnterpriseMetaInDefaultPartition()) require.Len(t, checks, 2) @@ -3569,9 +3586,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", strings.NewReader(json)) rr := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(rr, req) - require.NoError(t, err) - require.Nil(t, obj) + a.srv.h.ServeHTTP(rr, req) require.Equal(t, 200, rr.Code, "body: %s", rr.Body) svc := &structs.NodeService{ @@ -3721,16 +3736,16 @@ func testAgent_RegisterService_ACLDeny(t *testing.T, extraHCL string) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -3765,10 +3780,7 @@ func testAgent_RegisterService_InvalidAddress(t *testing.T, extraHCL string) { } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - if err != nil { - t.Fatalf("got error %v want nil", err) - } + a.srv.h.ServeHTTP(resp, req) if got, want := resp.Code, 400; got != want { t.Fatalf("got code %d want %d", got, want) } @@ -3833,9 +3845,8 @@ func testAgent_RegisterService_UnmanagedConnectProxy(t *testing.T, extraHCL stri req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) - require.NoError(t, err) - require.Nil(t, obj) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) // Ensure the service sid := structs.NewServiceID("connect-proxy", nil) @@ -3912,10 +3923,14 @@ func testCreateToken(t *testing.T, a *TestAgent, rules string) string { } req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - require.NotNil(t, obj) - aclResp := obj.(*structs.ACLToken) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec := json.NewDecoder(resp.Body) + aclResp := &structs.ACLToken{} + if err := dec.Decode(aclResp); err != nil { + t.Fatalf("err: %v", err) + } return aclResp.SecretID } @@ -3926,10 +3941,14 @@ func testCreatePolicy(t *testing.T, a *TestAgent, name, rules string) string { } req, _ := http.NewRequest("PUT", "/v1/acl/policy?token=root", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) - require.NotNil(t, obj) - aclResp := obj.(*structs.ACLPolicy) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + dec := json.NewDecoder(resp.Body) + aclResp := &structs.ACLPolicy{} + if err := dec.Decode(aclResp); err != nil { + t.Fatalf("err: %v", err) + } return aclResp.ID } @@ -4361,15 +4380,11 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token="+token, br) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) + a.srv.h.ServeHTTP(resp, req) if tt.wantErr != "" { - require.Error(err, "response code=%d, body:\n%s", - resp.Code, resp.Body.String()) - require.Contains(strings.ToLower(err.Error()), strings.ToLower(tt.wantErr)) + require.Contains(strings.ToLower(resp.Body.String()), strings.ToLower(tt.wantErr)) return } - require.NoError(err) - assert.Nil(obj) require.Equal(200, resp.Code, "request failed with body: %s", resp.Body.String()) @@ -4378,7 +4393,7 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s // Parse the expected definition into a ServiceDefinition var sd structs.ServiceDefinition - err = json.Unmarshal([]byte(tt.json), &sd) + err := json.Unmarshal([]byte(tt.json), &sd) require.NoError(err) require.NotEmpty(sd.Name) @@ -4419,9 +4434,8 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s req := httptest.NewRequest("PUT", "/v1/agent/service/deregister/"+svcID+"?token="+token, nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentDeregisterService(resp, req) - require.NoError(err) - require.Nil(obj) + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusOK, resp.Code) svcs := a.State.AllServices() _, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)] @@ -4474,9 +4488,7 @@ func testAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T, extraH req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) - assert.Nil(err) - assert.Nil(obj) + a.srv.h.ServeHTTP(resp, req) assert.Equal(http.StatusBadRequest, resp.Code) assert.Contains(resp.Body.String(), "Port") @@ -4524,9 +4536,8 @@ func testAgent_RegisterService_ConnectNative(t *testing.T, extraHCL string) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - obj, err := a.srv.AgentRegisterService(resp, req) - assert.Nil(err) - assert.Nil(obj) + a.srv.h.ServeHTTP(resp, req) + assert.Equal(http.StatusOK, resp.Code) // Ensure the service svc := a.State.Service(structs.NewServiceID("web", nil)) @@ -4572,13 +4583,13 @@ func testAgent_RegisterService_ScriptCheck_ExecDisable(t *testing.T, extraHCL st }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) - - _, err := a.srv.AgentRegisterService(nil, req) - if err == nil { - t.Fatalf("expected error but got nil") + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusInternalServerError != resp.Code { + t.Fatalf("expected 500 but got %v", resp.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(resp.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", resp.Body.String()) } checkID := types.CheckID("test-check") require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled") @@ -4624,13 +4635,13 @@ func testAgent_RegisterService_ScriptCheck_ExecRemoteDisable(t *testing.T, extra }, } req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) - - _, err := a.srv.AgentRegisterService(nil, req) - if err == nil { - t.Fatalf("expected error but got nil") + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusInternalServerError != resp.Code { + t.Fatalf("expected 500 but got %v", resp.Code) } - if !strings.Contains(err.Error(), "Scripts are disabled on this agent") { - t.Fatalf("expected script disabled error, got: %s", err) + if !strings.Contains(resp.Body.String(), "Scripts are disabled on this agent") { + t.Fatalf("expected script disabled error, got: %s", resp.Body.String()) } checkID := types.CheckID("test-check") require.Nil(t, a.State.Check(structs.NewCheckID(checkID, nil)), "check registered with exec disabled") @@ -4646,21 +4657,23 @@ func TestAgent_DeregisterService(t *testing.T) { defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil) - obj, err := a.srv.AgentDeregisterService(nil, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("bad: %v", obj) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } // Ensure we have a check mapping @@ -4678,26 +4691,30 @@ func TestAgent_DeregisterService_ACLDeny(t *testing.T) { defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test", nil) - if _, err := a.srv.AgentDeregisterService(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/deregister/test?token=root", nil) - if _, err := a.srv.AgentDeregisterService(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -4714,9 +4731,7 @@ func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) { t.Run("not enabled", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 400 { t.Fatalf("expected 400, got %d", resp.Code) } @@ -4725,9 +4740,7 @@ func TestAgent_ServiceMaintenance_BadRequest(t *testing.T) { t.Run("no service id", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/?enable=true", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 400 { t.Fatalf("expected 400, got %d", resp.Code) } @@ -4753,20 +4766,22 @@ func TestAgent_ServiceMaintenance_Enable(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") // Register the service - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) // Force the service into maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=mytoken", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4800,13 +4815,17 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) { testrpc.WaitForTestAgent(t, a.RPC, "dc1") // Register the service - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) // Force the service into maintenance mode if err := a.EnableServiceMaintenance(structs.NewServiceID("test", nil), "", ""); err != nil { @@ -4816,9 +4835,7 @@ func TestAgent_ServiceMaintenance_Disable(t *testing.T) { // Leave maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=false", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentServiceMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4841,26 +4858,30 @@ func TestAgent_ServiceMaintenance_ACLDeny(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") // Register the service. - service := &structs.NodeService{ - ID: "test", - Service: "test", - } - if err := a.addServiceFromSource(service, nil, false, "", ConfigSourceLocal); err != nil { - t.Fatalf("err: %v", err) + serviceReq := AddServiceRequest{ + Service: &structs.NodeService{ + ID: "test", + Service: "test", + }, + chkTypes: nil, + persist: false, + token: "", + Source: ConfigSourceLocal, } + require.NoError(t, a.AddService(serviceReq)) t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil) - if _, err := a.srv.AgentServiceMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=root", nil) - if _, err := a.srv.AgentServiceMaintenance(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -4877,9 +4898,7 @@ func TestAgent_NodeMaintenance_BadRequest(t *testing.T) { // Fails when no enable flag provided req, _ := http.NewRequest("PUT", "/v1/agent/maintenance", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 400 { t.Fatalf("expected 400, got %d", resp.Code) } @@ -4898,9 +4917,7 @@ func TestAgent_NodeMaintenance_Enable(t *testing.T) { // Force the node into maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=true&reason=broken&token=mytoken", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4938,9 +4955,7 @@ func TestAgent_NodeMaintenance_Disable(t *testing.T) { // Leave maintenance mode req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=false", nil) resp := httptest.NewRecorder() - if _, err := a.srv.AgentNodeMaintenance(resp, req); err != nil { - t.Fatalf("err: %s", err) - } + a.srv.h.ServeHTTP(resp, req) if resp.Code != 200 { t.Fatalf("expected 200, got %d", resp.Code) } @@ -4963,16 +4978,16 @@ func TestAgent_NodeMaintenance_ACLDeny(t *testing.T) { t.Run("no token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=true&reason=broken", nil) - if _, err := a.srv.AgentNodeMaintenance(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusForbidden, resp.Code) }) t.Run("root token", func(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/maintenance?enable=true&reason=broken&token=root", nil) - if _, err := a.srv.AgentNodeMaintenance(nil, req); err != nil { - t.Fatalf("err: %v", err) - } + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) }) } @@ -4996,8 +5011,10 @@ func TestAgent_RegisterCheck_Service(t *testing.T) { // First register the service req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, req); err != nil { - t.Fatalf("err: %v", err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if resp.Code != 200 { + t.Fatalf("expected 200, got %d", resp.Code) } // Now register an additional check @@ -5007,8 +5024,10 @@ func TestAgent_RegisterCheck_Service(t *testing.T) { TTL: 15 * time.Second, } req, _ = http.NewRequest("PUT", "/v1/agent/check/register", jsonReader(checkArgs)) - if _, err := a.srv.AgentRegisterCheck(nil, req); err != nil { - t.Fatalf("err: %v", err) + resp = httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if resp.Code != 200 { + t.Fatalf("expected 200, got %d", resp.Code) } // Ensure we have a check mapping @@ -5045,20 +5064,14 @@ func TestAgent_Monitor(t *testing.T) { // Try passing an invalid log level req, _ := http.NewRequest("GET", "/v1/agent/monitor?loglevel=invalid", nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentMonitor(resp, req) - if err == nil { - t.Fatal("expected BadRequestError to have occurred, got nil") - } - - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - if _, ok := err.(BadRequestError); !ok { - t.Fatalf("expected BadRequestError to have occurred, got %#v", err) + a.srv.h.ServeHTTP(resp, req) + if http.StatusBadRequest != resp.Code { + t.Fatalf("expected 400 but got %v", resp.Code) } substring := "Unknown log level" - if !strings.Contains(err.Error(), substring) { - t.Fatalf("got: %s, wanted message containing: %s", err.Error(), substring) + if !strings.Contains(resp.Body.String(), substring) { + t.Fatalf("got: %s, wanted message containing: %s", resp.Body.String(), substring) } }) @@ -5070,10 +5083,10 @@ func TestAgent_Monitor(t *testing.T) { req = req.WithContext(cancelCtx) resp := httptest.NewRecorder() - errCh := make(chan error) + codeCh := make(chan int) go func() { - _, err := a.srv.AgentMonitor(resp, req) - errCh <- err + a.srv.h.ServeHTTP(resp, req) + codeCh <- resp.Code }() args := &structs.ServiceDefinition{ @@ -5085,8 +5098,10 @@ func TestAgent_Monitor(t *testing.T) { } registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil { - t.Fatalf("err: %v", err) + res := httptest.NewRecorder() + a.srv.h.ServeHTTP(res, registerReq) + if http.StatusOK != res.Code { + t.Fatalf("expected 200 but got %v", res.Code) } // Wait until we have received some type of logging output @@ -5095,9 +5110,8 @@ func TestAgent_Monitor(t *testing.T) { }, 3*time.Second, 100*time.Millisecond) cancelFunc() - err := <-errCh - require.NoError(t, err) - + code := <-codeCh + require.Equal(t, http.StatusOK, code) got := resp.Body.String() // Only check a substring that we are highly confident in finding @@ -5134,8 +5148,10 @@ func TestAgent_Monitor(t *testing.T) { } registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil { - t.Fatalf("err: %v", err) + res := httptest.NewRecorder() + a.srv.h.ServeHTTP(res, registerReq) + if http.StatusOK != res.Code { + t.Fatalf("expected 200 but got %v", res.Code) } // Wait until we have received some type of logging output @@ -5154,10 +5170,10 @@ func TestAgent_Monitor(t *testing.T) { req = req.WithContext(cancelCtx) resp := httptest.NewRecorder() - errCh := make(chan error) + codeCh := make(chan int) go func() { - _, err := a.srv.AgentMonitor(resp, req) - errCh <- err + a.srv.h.ServeHTTP(resp, req) + codeCh <- resp.Code }() args := &structs.ServiceDefinition{ @@ -5169,8 +5185,10 @@ func TestAgent_Monitor(t *testing.T) { } registerReq, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) - if _, err := a.srv.AgentRegisterService(nil, registerReq); err != nil { - t.Fatalf("err: %v", err) + res := httptest.NewRecorder() + a.srv.h.ServeHTTP(res, registerReq) + if http.StatusOK != res.Code { + t.Fatalf("expected 200 but got %v", res.Code) } // Wait until we have received some type of logging output @@ -5179,8 +5197,8 @@ func TestAgent_Monitor(t *testing.T) { }, 3*time.Second, 100*time.Millisecond) cancelFunc() - err := <-errCh - require.NoError(t, err) + code := <-codeCh + require.Equal(t, http.StatusOK, code) // Each line is output as a separate JSON object, we grab the first and // make sure it can be unmarshalled. @@ -5199,12 +5217,12 @@ func TestAgent_Monitor(t *testing.T) { req = req.WithContext(cancelCtx) resp := httptest.NewRecorder() - chErr := make(chan error) + codeCh := make(chan int) chStarted := make(chan struct{}) go func() { close(chStarted) - _, err := a.srv.AgentMonitor(resp, req) - chErr <- err + a.srv.h.ServeHTTP(resp, req) + codeCh <- resp.Code }() <-chStarted @@ -5216,8 +5234,8 @@ func TestAgent_Monitor(t *testing.T) { }, 3*time.Second, 100*time.Millisecond) cancelFunc() - err := <-chErr - require.NoError(t, err) + code := <-codeCh + require.Equal(t, http.StatusOK, code) got := resp.Body.String() want := "serf: Shutdown without a Leave" @@ -5239,8 +5257,10 @@ func TestAgent_Monitor_ACLDeny(t *testing.T) { // Try without a token. req, _ := http.NewRequest("GET", "/v1/agent/monitor", nil) - if _, err := a.srv.AgentMonitor(nil, req); !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if http.StatusForbidden != resp.Code { + t.Fatalf("expected 403 but got %v", resp.Code) } // This proves we call the ACL function, and we've got the other monitor @@ -5270,11 +5290,12 @@ func TestAgent_TokenTriggersFullSync(t *testing.T) { require.NoError(t, err) resp := httptest.NewRecorder() - obj, err := a.srv.ACLPolicyCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) - policy, ok := obj.(*structs.ACLPolicy) - require.True(t, ok) + dec := json.NewDecoder(resp.Body) + policy = &structs.ACLPolicy{} + require.NoError(t, dec.Decode(policy)) return policy } @@ -5292,11 +5313,12 @@ func TestAgent_TokenTriggersFullSync(t *testing.T) { require.NoError(t, err) resp := httptest.NewRecorder() - obj, err := a.srv.ACLTokenCreate(resp, req) - require.NoError(t, err) + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) - token, ok := obj.(*structs.ACLToken) - require.True(t, ok) + dec := json.NewDecoder(resp.Body) + token = &structs.ACLToken{} + require.NoError(t, dec.Decode(token)) return token } @@ -5674,9 +5696,9 @@ func TestAgentConnectCARoots_empty(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectCARoots(resp, req) - require.Error(err) - require.Contains(err.Error(), "Connect must be enabled") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusInternalServerError, resp.Code) + require.Contains(resp.Body.String(), "Connect must be enabled") } func TestAgentConnectCARoots_list(t *testing.T) { @@ -5699,10 +5721,12 @@ func TestAgentConnectCARoots_list(t *testing.T) { // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCARoots(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + value := &structs.IndexedCARoots{} + require.NoError(dec.Decode(value)) - value := obj.(structs.IndexedCARoots) assert.Equal(value.ActiveRootID, ca2.ID) // Would like to assert that it's the same as the TestAgent domain but the // only way to access that state via this package is by RPC to the server @@ -5722,9 +5746,12 @@ func TestAgentConnectCARoots_list(t *testing.T) { { // List it again resp2 := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCARoots(resp2, req) - require.NoError(err) - assert.Equal(obj, obj2) + a.srv.h.ServeHTTP(resp2, req) + + dec := json.NewDecoder(resp2.Body) + value2 := &structs.IndexedCARoots{} + require.NoError(dec.Decode(value2)) + assert.Equal(value, value2) // Should cache hit this time and not make request assert.Equal("HIT", resp2.Header().Get("X-Cache")) @@ -5738,10 +5765,11 @@ func TestAgentConnectCARoots_list(t *testing.T) { retry.Run(t, func(r *retry.R) { // List it again resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCARoots(resp, req) - r.Check(err) + a.srv.h.ServeHTTP(resp, req) - value := obj.(structs.IndexedCARoots) + dec := json.NewDecoder(resp.Body) + value := &structs.IndexedCARoots{} + require.NoError(dec.Decode(value)) if ca.ID != value.ActiveRootID { r.Fatalf("%s != %s", ca.ID, value.ActiveRootID) } @@ -5789,16 +5817,14 @@ func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectCALeafCert(resp, req) - require.Error(err) - require.True(acl.IsErrPermissionDenied(err)) + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusForbidden, resp.Code) } func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { @@ -5829,8 +5855,7 @@ func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } @@ -5838,12 +5863,13 @@ func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) // Get the issued cert - _, ok := obj.(*structs.IssuedCert) - require.True(ok) + dec := json.NewDecoder(resp.Body) + value := &structs.IssuedCert{} + require.NoError(dec.Decode(value)) + require.NotNil(value) } func createACLTokenWithServicePolicy(t *testing.T, srv *HTTPHandlers, policy string) string { @@ -5864,10 +5890,11 @@ func createACLTokenWithServicePolicy(t *testing.T, srv *HTTPHandlers, policy str req, _ = http.NewRequest("PUT", "/v1/acl/token?token=root", jsonReader(tokenReq)) resp = httptest.NewRecorder() - tokInf, err := srv.ACLTokenCreate(resp, req) - require.NoError(t, err) - svcToken, ok := tokInf.(*structs.ACLToken) - require.True(t, ok) + srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + svcToken := &structs.ACLToken{} + require.NoError(t, dec.Decode(svcToken)) return svcToken.SecretID } @@ -5899,8 +5926,7 @@ func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } @@ -5908,9 +5934,8 @@ func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectCALeafCert(resp, req) - require.Error(err) - require.True(acl.IsErrPermissionDenied(err)) + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusForbidden, resp.Code) } func TestAgentConnectCALeafCert_good(t *testing.T) { @@ -5948,8 +5973,7 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -5958,13 +5982,13 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, ca1) @@ -5980,9 +6004,11 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Set a new CA @@ -5992,9 +6018,10 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+index, nil) - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - issued2 := obj.(*structs.IssuedCert) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) require.NotEqual(issued.CertPEM, issued2.CertPEM) require.NotEqual(issued.PrivateKeyPEM, issued2.PrivateKeyPEM) @@ -6091,8 +6118,7 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.CatalogRegister(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -6101,13 +6127,13 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, ca1) @@ -6121,9 +6147,11 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Test Blocking - see https://github.com/hashicorp/consul/issues/4462 @@ -6133,7 +6161,7 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { blockingReq, _ := http.NewRequest("GET", fmt.Sprintf("/v1/agent/connect/ca/leaf/test?wait=125ms&index=%d", issued.ModifyIndex), nil) doneCh := make(chan struct{}) go func() { - a.srv.AgentConnectCALeafCert(resp, blockingReq) + a.srv.h.ServeHTTP(resp, blockingReq) close(doneCh) }() @@ -6154,10 +6182,11 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { resp := httptest.NewRecorder() // Try and sign again (note no index/wait arg since cache should update in // background even if we aren't actively blocking) - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - r.Check(err) + a.srv.h.ServeHTTP(resp, req) - issued2 := obj.(*structs.IssuedCert) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) if issued.CertPEM == issued2.CertPEM { r.Fatalf("leaf has not updated") } @@ -6233,8 +6262,7 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentRegisterService(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -6243,13 +6271,13 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, ca1) @@ -6263,9 +6291,11 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) + a.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Test that we aren't churning leaves for no reason at idle. @@ -6274,11 +6304,17 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) go func() { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil) resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCALeafCert(resp, req) - if err != nil { + a.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { + ch <- fmt.Errorf(resp.Body.String()) + return + } + + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + if err := dec.Decode(issued2); err != nil { ch <- err } else { - issued2 := obj.(*structs.IssuedCert) if issued.CertPEM == issued2.CertPEM { ch <- fmt.Errorf("leaf woke up unexpectedly with same cert") } else { @@ -6288,7 +6324,6 @@ func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) }() start := time.Now() - select { case <-time.After(5 * time.Second): case err := <-ch: @@ -6364,8 +6399,7 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { } req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() - _, err := a2.srv.AgentRegisterService(resp, req) - require.NoError(err) + a2.srv.h.ServeHTTP(resp, req) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -6375,13 +6409,14 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { req, err := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) require.NoError(err) resp := httptest.NewRecorder() - obj, err := a2.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) + a2.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusOK, resp.Code) require.Equal("MISS", resp.Header().Get("X-Cache")) // Get the issued cert - issued, ok := obj.(*structs.IssuedCert) - assert.True(ok) + dec := json.NewDecoder(resp.Body) + issued := &structs.IssuedCert{} + require.NoError(dec.Decode(issued)) // Verify that the cert is signed by the CA requireLeafValidUnderCA(t, issued, dc1_ca1) @@ -6395,9 +6430,11 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { { // Fetch it again resp := httptest.NewRecorder() - obj2, err := a2.srv.AgentConnectCALeafCert(resp, req) - require.NoError(err) - require.Equal(obj, obj2) + a2.srv.h.ServeHTTP(resp, req) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) + require.Equal(issued, issued2) } // Test that we aren't churning leaves for no reason at idle. @@ -6406,11 +6443,17 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { go func() { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+strconv.Itoa(int(issued.ModifyIndex)), nil) resp := httptest.NewRecorder() - obj, err := a2.srv.AgentConnectCALeafCert(resp, req) - if err != nil { + a2.srv.h.ServeHTTP(resp, req) + if resp.Code != http.StatusOK { + ch <- fmt.Errorf(resp.Body.String()) + return + } + + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + if err := dec.Decode(issued2); err != nil { ch <- err } else { - issued2 := obj.(*structs.IssuedCert) if issued.CertPEM == issued2.CertPEM { ch <- fmt.Errorf("leaf woke up unexpectedly with same cert") } else { @@ -6445,10 +6488,12 @@ func TestAgentConnectCALeafCert_secondaryDC_good(t *testing.T) { resp := httptest.NewRecorder() // Try and sign again (note no index/wait arg since cache should update in // background even if we aren't actively blocking) - obj, err := a2.srv.AgentConnectCALeafCert(resp, req) - r.Check(err) + a2.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusOK, resp.Code) - issued2 := obj.(*structs.IssuedCert) + dec := json.NewDecoder(resp.Body) + issued2 := &structs.IssuedCert{} + require.NoError(dec.Decode(issued2)) if issued.CertPEM == issued2.CertPEM { r.Fatalf("leaf has not updated") } @@ -6470,15 +6515,14 @@ func waitForActiveCARoot(t *testing.T, srv *HTTPHandlers, expect *structs.CARoot retry.Run(t, func(r *retry.R) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() - obj, err := srv.AgentConnectCARoots(resp, req) - if err != nil { - r.Fatalf("err: %v", err) + srv.h.ServeHTTP(resp, req) + if http.StatusOK != resp.Code { + t.Fatalf("expected 200 but got %v", resp.Code) } - roots, ok := obj.(structs.IndexedCARoots) - if !ok { - r.Fatalf("response is wrong type %T", obj) - } + dec := json.NewDecoder(resp.Body) + roots := &structs.IndexedCARoots{} + require.NoError(t, dec.Decode(roots)) var root *structs.CARoot for _, r := range roots.Roots { @@ -6530,12 +6574,9 @@ func TestAgentConnectAuthorize_badBody(t *testing.T) { args := []string{} req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "decode failed") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "decode failed") } func TestAgentConnectAuthorize_noTarget(t *testing.T) { @@ -6554,12 +6595,9 @@ func TestAgentConnectAuthorize_noTarget(t *testing.T) { args := &structs.ConnectAuthorizeRequest{} req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "Target service must be specified") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "Target service must be specified") } // Client ID is not in the valid URI format @@ -6582,12 +6620,9 @@ func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "ClientCertURI not a valid Connect identifier") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "ClientCertURI not a valid Connect identifier") } // Client ID is a valid URI but its not a service URI @@ -6610,12 +6645,9 @@ func TestAgentConnectAuthorize_idNotService(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Error(err) - assert.Nil(respRaw) - // Note that BadRequestError is handled outside the endpoint handler so we - // still see a 200 if we check here. - assert.Contains(err.Error(), "ClientCertURI not a valid Service identifier") + a.srv.h.ServeHTTP(resp, req) + require.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "ClientCertURI not a valid Service identifier") } // Test when there is an intention allowing the connection @@ -6656,12 +6688,13 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Nil(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code) require.Equal("MISS", resp.Header().Get("X-Cache")) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") @@ -6669,11 +6702,12 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Nil(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") @@ -6705,11 +6739,12 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.Nil(err) + a.srv.h.ServeHTTP(resp, req) require.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.False(obj.Authorized) require.Contains(obj.Reason, "Matched") @@ -6757,11 +6792,12 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) assert.False(obj.Authorized) assert.Contains(obj.Reason, "Matched") } @@ -6812,11 +6848,12 @@ func TestAgentConnectAuthorize_allowTrustDomain(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") } @@ -6879,11 +6916,12 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) assert.True(obj.Authorized) assert.Contains(obj.Reason, "Matched") } @@ -6896,11 +6934,12 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - require.NoError(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(dec.Decode(obj)) assert.False(obj.Authorized) assert.Contains(obj.Reason, "Matched") } @@ -6928,8 +6967,9 @@ func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token="+token, jsonReader(args)) resp := httptest.NewRecorder() - _, err := a.srv.AgentConnectAuthorize(resp, req) - assert.True(acl.IsErrPermissionDenied(err)) + a.srv.h.ServeHTTP(resp, req) + + assert.Equal(http.StatusForbidden, resp.Code) } // Test when no intentions match w/ a default deny policy @@ -6951,11 +6991,12 @@ func TestAgentConnectAuthorize_defaultDeny(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) assert.False(obj.Authorized) assert.Contains(obj.Reason, "Default behavior") } @@ -6986,12 +7027,12 @@ func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) + a.srv.h.ServeHTTP(resp, req) assert.Equal(200, resp.Code) - assert.NotNil(respRaw) - obj := respRaw.(*connectAuthorizeResp) + dec := json.NewDecoder(resp.Body) + obj := &connectAuthorizeResp{} + require.NoError(t, dec.Decode(obj)) assert.True(obj.Authorized) assert.Contains(obj.Reason, "Default behavior") } @@ -7017,6 +7058,7 @@ func TestAgent_Host(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/host?token=master", nil) resp := httptest.NewRecorder() + // TODO: AgentHost should write to response so that we can test using ServeHTTP() respRaw, err := a.srv.AgentHost(resp, req) assert.Nil(err) assert.Equal(http.StatusOK, resp.Code) @@ -7048,10 +7090,10 @@ func TestAgent_HostBadACL(t *testing.T) { testrpc.WaitForLeader(t, a.RPC, "dc1") req, _ := http.NewRequest("GET", "/v1/agent/host?token=agent", nil) resp := httptest.NewRecorder() - respRaw, err := a.srv.AgentHost(resp, req) + // TODO: AgentHost should write to response so that we can test using ServeHTTP() + _, err := a.srv.AgentHost(resp, req) assert.EqualError(err, "ACL not found") assert.Equal(http.StatusOK, resp.Code) - assert.Nil(respRaw) } // Thie tests that a proxy with an ExposeConfig is returned as expected. @@ -7088,12 +7130,19 @@ func TestAgent_Services_ExposeConfig(t *testing.T) { a.State.AddService(srv1, "") req, _ := http.NewRequest("GET", "/v1/agent/services", nil) - obj, err := a.srv.AgentServices(httptest.NewRecorder(), req) - require.NoError(t, err) - val := obj.(map[string]*api.AgentService) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + dec := json.NewDecoder(resp.Body) + val := make(map[string]*api.AgentService) + require.NoError(t, dec.Decode(&val)) require.Len(t, val, 1) actual := val["proxy-id"] require.NotNil(t, actual) require.Equal(t, api.ServiceKindConnectProxy, actual.Kind) + // Proxy.ToAPI() creates an empty Upstream list instead of keeping nil so do the same with actual. + if actual.Proxy.Upstreams == nil { + actual.Proxy.Upstreams = make([]api.Upstream, 0) + } require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy) } From b74ddd7b7017c848337a9878e71294cf81334bcf Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Tue, 7 Dec 2021 10:18:28 -0500 Subject: [PATCH 53/60] Godocs updates for catalog endpoints (#11716) --- agent/consul/catalog_endpoint.go | 163 +++++++++++++++++-------------- agent/structs/structs.go | 12 ++- 2 files changed, 97 insertions(+), 78 deletions(-) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index b853b4aca..4de46558a 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -71,76 +71,8 @@ type Catalog struct { logger hclog.Logger } -// nodePreApply does the verification of a node before it is applied to Raft. -func nodePreApply(nodeName, nodeID string) error { - if nodeName == "" { - return fmt.Errorf("Must provide node") - } - if nodeID != "" { - if _, err := uuid.ParseUUID(nodeID); err != nil { - return fmt.Errorf("Bad node ID: %v", err) - } - } - - return nil -} - -func servicePreApply(service *structs.NodeService, authz acl.Authorizer, authzCtxFill func(*acl.AuthorizerContext)) error { - // Validate the service. This is in addition to the below since - // the above just hasn't been moved over yet. We should move it over - // in time. - if err := service.Validate(); err != nil { - return err - } - - // If no service id, but service name, use default - if service.ID == "" && service.Service != "" { - service.ID = service.Service - } - - // Verify ServiceName provided if ID. - if service.ID != "" && service.Service == "" { - return fmt.Errorf("Must provide service name with ID") - } - - // Check the service address here and in the agent endpoint - // since service registration isn't synchronous. - if ipaddr.IsAny(service.Address) { - return fmt.Errorf("Invalid service address") - } - - var authzContext acl.AuthorizerContext - authzCtxFill(&authzContext) - - // Apply the ACL policy if any. The 'consul' service is excluded - // since it is managed automatically internally (that behavior - // is going away after version 0.8). We check this same policy - // later if version 0.8 is enabled, so we can eventually just - // delete this and do all the ACL checks down there. - if service.Service != structs.ConsulServiceName { - if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow { - return acl.ErrPermissionDenied - } - } - - // Proxies must have write permission on their destination - if service.Kind == structs.ServiceKindConnectProxy { - if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow { - return acl.ErrPermissionDenied - } - } - - return nil -} - -// checkPreApply does the verification of a check before it is applied to Raft. -func checkPreApply(check *structs.HealthCheck) { - if check.CheckID == "" && check.Name != "" { - check.CheckID = types.CheckID(check.Name) - } -} - -// Register is used register that a node is providing a given service. +// Register a service and/or check(s) in a node, creating the node if it doesn't exist. +// It is valid to pass no service or checks to simply create the node itself. func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error { if done, err := c.srv.ForwardRPC("Catalog.Register", args, reply); done { return err @@ -212,6 +144,75 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error return err } +// nodePreApply does the verification of a node before it is applied to Raft. +func nodePreApply(nodeName, nodeID string) error { + if nodeName == "" { + return fmt.Errorf("Must provide node") + } + if nodeID != "" { + if _, err := uuid.ParseUUID(nodeID); err != nil { + return fmt.Errorf("Bad node ID: %v", err) + } + } + + return nil +} + +func servicePreApply(service *structs.NodeService, authz acl.Authorizer, authzCtxFill func(*acl.AuthorizerContext)) error { + // Validate the service. This is in addition to the below since + // the above just hasn't been moved over yet. We should move it over + // in time. + if err := service.Validate(); err != nil { + return err + } + + // If no service id, but service name, use default + if service.ID == "" && service.Service != "" { + service.ID = service.Service + } + + // Verify ServiceName provided if ID. + if service.ID != "" && service.Service == "" { + return fmt.Errorf("Must provide service name with ID") + } + + // Check the service address here and in the agent endpoint + // since service registration isn't synchronous. + if ipaddr.IsAny(service.Address) { + return fmt.Errorf("Invalid service address") + } + + var authzContext acl.AuthorizerContext + authzCtxFill(&authzContext) + + // Apply the ACL policy if any. The 'consul' service is excluded + // since it is managed automatically internally (that behavior + // is going away after version 0.8). We check this same policy + // later if version 0.8 is enabled, so we can eventually just + // delete this and do all the ACL checks down there. + if service.Service != structs.ConsulServiceName { + if authz.ServiceWrite(service.Service, &authzContext) != acl.Allow { + return acl.ErrPermissionDenied + } + } + + // Proxies must have write permission on their destination + if service.Kind == structs.ServiceKindConnectProxy { + if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow { + return acl.ErrPermissionDenied + } + } + + return nil +} + +// checkPreApply does the verification of a check before it is applied to Raft. +func checkPreApply(check *structs.HealthCheck) { + if check.CheckID == "" && check.Name != "" { + check.CheckID = types.CheckID(check.Name) + } +} + // vetRegisterWithACL applies the given ACL's policy to the catalog update and // determines if it is allowed. Since the catalog register request is so // dynamic, this is a pretty complex algorithm and was worth breaking out of the @@ -330,7 +331,13 @@ func vetRegisterWithACL( return nil } -// Deregister is used to remove a service registration for a given node. +// Deregister a service or check in a node, or the entire node itself. +// +// If a ServiceID is provided in the request, any associated Checks +// with that service are also deregistered. +// +// If a ServiceID or CheckID is not provided in the request, the entire +// node is deregistered. func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) error { if done, err := c.srv.ForwardRPC("Catalog.Deregister", args, reply); done { return err @@ -458,7 +465,7 @@ func (c *Catalog) ListDatacenters(args *structs.DatacentersRequest, reply *[]str return nil } -// ListNodes is used to query the nodes in a DC +// ListNodes is used to query the nodes in a DC. func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.IndexedNodes) error { if done, err := c.srv.ForwardRPC("Catalog.ListNodes", args, reply); done { return err @@ -509,7 +516,8 @@ func isUnmodified(opts structs.QueryOptions, index uint64) bool { return opts.AllowNotModifiedResponse && opts.MinQueryIndex > 0 && opts.MinQueryIndex == index } -// ListServices is used to query the services in a DC +// ListServices is used to query the services in a DC. +// Returns services as a map of service names to available tags. func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.IndexedServices) error { if done, err := c.srv.ForwardRPC("Catalog.ListServices", args, reply); done { return err @@ -552,6 +560,8 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I }) } +// ServiceList is used to query the services in a DC. +// Returns services as a list of ServiceNames. func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error { if done, err := c.srv.ForwardRPC("Catalog.ServiceList", args, reply); done { return err @@ -581,7 +591,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In }) } -// ServiceNodes returns all the nodes registered as part of a service +// ServiceNodes returns all the nodes registered as part of a service. func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceNodes) error { if done, err := c.srv.ForwardRPC("Catalog.ServiceNodes", args, reply); done { return err @@ -721,7 +731,8 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru return err } -// NodeServices returns all the services registered as part of a node +// NodeServices returns all the services registered as part of a node. +// Returns NodeServices as a map of service IDs to services. func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServices) error { if done, err := c.srv.ForwardRPC("Catalog.NodeServices", args, reply); done { return err @@ -776,6 +787,8 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs }) } +// NodeServiceList returns all the services registered as part of a node. +// Returns NodeServices as a list of services. func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *structs.IndexedNodeServiceList) error { if done, err := c.srv.ForwardRPC("Catalog.NodeServiceList", args, reply); done { return err diff --git a/agent/structs/structs.go b/agent/structs/structs.go index e79cf6ef9..cd7733b9b 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -463,9 +463,11 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool { return false } -// DeregisterRequest is used for the Catalog.Deregister endpoint -// to deregister a node as providing a service. If no service is -// provided the entire node is deregistered. +// DeregisterRequest is used for the Catalog.Deregister endpoint to +// deregister a service, check, or node (only one should be provided). +// If ServiceID or CheckID are not provided, the entire node is deregistered. +// If a ServiceID is provided, any associated Checks with that service +// are also deregistered. type DeregisterRequest struct { Datacenter string Node string @@ -1488,11 +1490,15 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { } } +// NodeServices represents services provided by Node. +// Services is a map of service IDs to services. type NodeServices struct { Node *Node Services map[string]*NodeService } +// NodeServiceList represents services provided by Node. +// Services is a list of services. type NodeServiceList struct { Node *Node Services []*NodeService From f4b07b86ceefb29bdfd15daaf12f26f3dfab733f Mon Sep 17 00:00:00 2001 From: Melissa Kam Date: Tue, 7 Dec 2021 13:55:49 -0600 Subject: [PATCH 54/60] docs/nia: Fix typo in CTS CLI options --- website/content/docs/nia/cli/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/nia/cli/index.mdx b/website/content/docs/nia/cli/index.mdx index c216e2da7..24d52476f 100644 --- a/website/content/docs/nia/cli/index.mdx +++ b/website/content/docs/nia/cli/index.mdx @@ -86,7 +86,7 @@ Below are options that can be used across all commands: |----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------------| | `-port` | Integer value that specifies the port from which the Consul-Terraform-Sync daemon serves its API.
    The value is prepended with `http://localhost:`, but you can specify a different scheme or address with the `-http-addr` if necessary. | Optional | `8558` | | `-http-addr` | String value that specifies the address and port of the Consul-Terraform-Sync API. You can specify an IP or DNS address.

    Alternatively, you can specify a value using the `CTS_HTTP_ADDR` environment variable. | Optional | `http://localhost:8558` | -| `-ssl-verify` | Boolean value that configures Consul-Terraform-Connect to enable verification for TLS/SSL connections to the API. This does not affect insecure HTTP connections.

    Alternatively, you can specify the value using the `CTS_SSL_VERIFY` environment variable. | Optional | `true` | +| `-ssl-verify` | Boolean value that configures Consul-Terraform-Sync to enable verification for TLS/SSL connections to the API. This does not affect insecure HTTP connections.

    Alternatively, you can specify the value using the `CTS_SSL_VERIFY` environment variable. | Optional | `true` | | `-ca-cert` | String value that specifies the path to a PEM-encoded certificate authority file that is used to verify TLS/SSL connections. Takes precedence over `-ca-path` if both are provided.

    Alternatively, you can specify the value using the `CTS_CACERT` environment variable. | Optional | `""` | | `-ca-path` | String value that specifies the path to a directory containing a PEM-encoded certificate authority file that is used to verify TLS/SSL connections.

    Alternatively, you can specify the value using the `CTS_CAPATH` environment variable. | Optional | `""` | | `-client-cert`                                                 | String value that specifies the path to a PEM-encoded client certificate that the Consul-Terraform-Sync API requires when [`verify_incoming`](/docs/nia/configuration#verify_incoming) is set to `true` on the API.

    Alternatively, you can specify the value using the `CTS_CLIENT_CERT` environment variable. | Optional | `""` | From b19c7f17efec7b617ee77c0679c882c96601465c Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Tue, 7 Dec 2021 19:59:38 +0000 Subject: [PATCH 55/60] Rename `Master` and `AgentMaster` fields in config protobuf (#11764) --- agent/auto-config/config_translate.go | 6 +- agent/auto-config/config_translate_test.go | 22 ++-- proto/pbconfig/config.pb.go | 145 +++++++++++---------- proto/pbconfig/config.proto | 4 +- 4 files changed, 87 insertions(+), 90 deletions(-) diff --git a/agent/auto-config/config_translate.go b/agent/auto-config/config_translate.go index 58721e021..0c8939c55 100644 --- a/agent/auto-config/config_translate.go +++ b/agent/auto-config/config_translate.go @@ -65,14 +65,12 @@ func translateConfig(c *pbconfig.Config) config.Config { } result.ACL.Tokens = config.Tokens{ + InitialManagement: stringPtrOrNil(t.InitialManagement), + AgentRecovery: stringPtrOrNil(t.AgentRecovery), Replication: stringPtrOrNil(t.Replication), Default: stringPtrOrNil(t.Default), Agent: stringPtrOrNil(t.Agent), ManagedServiceProvider: tokens, - DeprecatedTokens: config.DeprecatedTokens{ - Master: stringPtrOrNil(t.Master), - AgentMaster: stringPtrOrNil(t.AgentMaster), - }, } } } diff --git a/agent/auto-config/config_translate_test.go b/agent/auto-config/config_translate_test.go index b306778d2..c70ae2087 100644 --- a/agent/auto-config/config_translate_test.go +++ b/agent/auto-config/config_translate_test.go @@ -69,11 +69,11 @@ func TestTranslateConfig(t *testing.T) { EnableTokenPersistence: true, MSPDisableBootstrap: false, Tokens: &pbconfig.ACLTokens{ - Master: "99e7e490-6baf-43fc-9010-78b6aa9a6813", - Replication: "51308d40-465c-4ac6-a636-7c0747edec89", - AgentMaster: "e012e1ea-78a2-41cc-bc8b-231a44196f39", - Default: "8781a3f5-de46-4b45-83e1-c92f4cfd0332", - Agent: "ddb8f1b0-8a99-4032-b601-87926bce244e", + InitialManagement: "99e7e490-6baf-43fc-9010-78b6aa9a6813", + Replication: "51308d40-465c-4ac6-a636-7c0747edec89", + AgentRecovery: "e012e1ea-78a2-41cc-bc8b-231a44196f39", + Default: "8781a3f5-de46-4b45-83e1-c92f4cfd0332", + Agent: "ddb8f1b0-8a99-4032-b601-87926bce244e", ManagedServiceProvider: []*pbconfig.ACLServiceProviderToken{ { AccessorID: "23f37987-7b9e-4e5b-acae-dbc9bc137bae", @@ -129,19 +129,17 @@ func TestTranslateConfig(t *testing.T) { EnableKeyListPolicy: boolPointer(true), EnableTokenPersistence: boolPointer(true), Tokens: config.Tokens{ - Replication: stringPointer("51308d40-465c-4ac6-a636-7c0747edec89"), - Default: stringPointer("8781a3f5-de46-4b45-83e1-c92f4cfd0332"), - Agent: stringPointer("ddb8f1b0-8a99-4032-b601-87926bce244e"), + InitialManagement: stringPointer("99e7e490-6baf-43fc-9010-78b6aa9a6813"), + AgentRecovery: stringPointer("e012e1ea-78a2-41cc-bc8b-231a44196f39"), + Replication: stringPointer("51308d40-465c-4ac6-a636-7c0747edec89"), + Default: stringPointer("8781a3f5-de46-4b45-83e1-c92f4cfd0332"), + Agent: stringPointer("ddb8f1b0-8a99-4032-b601-87926bce244e"), ManagedServiceProvider: []config.ServiceProviderToken{ { AccessorID: stringPointer("23f37987-7b9e-4e5b-acae-dbc9bc137bae"), SecretID: stringPointer("e28b820a-438e-4e2b-ad24-fe59e6a4914f"), }, }, - DeprecatedTokens: config.DeprecatedTokens{ - Master: stringPointer("99e7e490-6baf-43fc-9010-78b6aa9a6813"), - AgentMaster: stringPointer("e012e1ea-78a2-41cc-bc8b-231a44196f39"), - }, }, }, AutoEncrypt: config.AutoEncrypt{ diff --git a/proto/pbconfig/config.pb.go b/proto/pbconfig/config.pb.go index 901e147de..458fc5482 100644 --- a/proto/pbconfig/config.pb.go +++ b/proto/pbconfig/config.pb.go @@ -461,9 +461,9 @@ func (m *ACL) GetMSPDisableBootstrap() bool { } type ACLTokens struct { - Master string `protobuf:"bytes,1,opt,name=Master,proto3" json:"Master,omitempty"` + InitialManagement string `protobuf:"bytes,1,opt,name=InitialManagement,proto3" json:"InitialManagement,omitempty"` Replication string `protobuf:"bytes,2,opt,name=Replication,proto3" json:"Replication,omitempty"` - AgentMaster string `protobuf:"bytes,3,opt,name=AgentMaster,proto3" json:"AgentMaster,omitempty"` + AgentRecovery string `protobuf:"bytes,3,opt,name=AgentRecovery,proto3" json:"AgentRecovery,omitempty"` Default string `protobuf:"bytes,4,opt,name=Default,proto3" json:"Default,omitempty"` Agent string `protobuf:"bytes,5,opt,name=Agent,proto3" json:"Agent,omitempty"` ManagedServiceProvider []*ACLServiceProviderToken `protobuf:"bytes,6,rep,name=ManagedServiceProvider,proto3" json:"ManagedServiceProvider,omitempty"` @@ -505,9 +505,9 @@ func (m *ACLTokens) XXX_DiscardUnknown() { var xxx_messageInfo_ACLTokens proto.InternalMessageInfo -func (m *ACLTokens) GetMaster() string { +func (m *ACLTokens) GetInitialManagement() string { if m != nil { - return m.Master + return m.InitialManagement } return "" } @@ -519,9 +519,9 @@ func (m *ACLTokens) GetReplication() string { return "" } -func (m *ACLTokens) GetAgentMaster() string { +func (m *ACLTokens) GetAgentRecovery() string { if m != nil { - return m.AgentMaster + return m.AgentRecovery } return "" } @@ -687,58 +687,59 @@ func init() { func init() { proto.RegisterFile("proto/pbconfig/config.proto", fileDescriptor_aefa824db7b74d77) } var fileDescriptor_aefa824db7b74d77 = []byte{ - // 811 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdb, 0x8e, 0xe3, 0x44, - 0x10, 0xc5, 0xe3, 0x1d, 0xcf, 0xa4, 0x02, 0xab, 0xdd, 0xde, 0x25, 0x58, 0x5c, 0x42, 0x64, 0xa1, - 0xd5, 0x80, 0xd0, 0x0c, 0x1a, 0x04, 0x02, 0xc4, 0x4b, 0x26, 0x59, 0x41, 0xd8, 0x24, 0x44, 0x76, - 0x58, 0x24, 0x5e, 0x90, 0xe3, 0x54, 0x92, 0x16, 0x4e, 0xb7, 0xd5, 0xee, 0xec, 0x28, 0x7f, 0xc2, - 0xbf, 0xf0, 0x03, 0xbc, 0xc1, 0x27, 0xc0, 0xf0, 0x03, 0x7c, 0x02, 0xea, 0x8b, 0x6f, 0x43, 0xf2, - 0x94, 0xd4, 0x39, 0xa7, 0xab, 0xab, 0xba, 0x2e, 0x86, 0x77, 0x32, 0xc1, 0x25, 0xbf, 0xca, 0x16, - 0x09, 0x67, 0x2b, 0xba, 0xbe, 0x32, 0x3f, 0x97, 0x1a, 0x25, 0x9e, 0xb1, 0x82, 0x3f, 0x4e, 0xc0, - 0x1b, 0xe8, 0xbf, 0xa4, 0x0b, 0x30, 0x8c, 0x65, 0x9c, 0x20, 0x93, 0x28, 0x7c, 0xa7, 0xe7, 0x5c, - 0xb4, 0xc2, 0x1a, 0x42, 0x3e, 0x86, 0xc7, 0x33, 0x41, 0xb7, 0xb1, 0xd8, 0xd7, 0x64, 0x27, 0x5a, - 0xf6, 0x7f, 0x82, 0xbc, 0x0d, 0xe7, 0x53, 0xbe, 0xc4, 0x69, 0xbc, 0x45, 0xdf, 0xd5, 0xa2, 0xd2, - 0x26, 0x3d, 0x68, 0x47, 0xb8, 0xde, 0x22, 0x93, 0x9a, 0x7e, 0xa0, 0xe9, 0x3a, 0x44, 0xde, 0x85, - 0xd6, 0x2c, 0x16, 0x92, 0x4a, 0xca, 0x99, 0xdf, 0xd2, 0x7c, 0x05, 0x90, 0xf7, 0xc0, 0xed, 0x0f, - 0xc6, 0xfe, 0x69, 0xcf, 0xb9, 0x68, 0x5f, 0xb7, 0x2f, 0x6d, 0x62, 0xfd, 0xc1, 0x38, 0x54, 0x38, - 0xf9, 0x0c, 0xda, 0xfd, 0x9d, 0xe4, 0xcf, 0x59, 0x22, 0xf6, 0x99, 0xf4, 0x3d, 0x2d, 0x7b, 0x52, - 0xca, 0x2a, 0x2a, 0xac, 0xeb, 0xc8, 0x33, 0xf0, 0xbe, 0xe1, 0x79, 0x4e, 0x33, 0xff, 0x4c, 0x9f, - 0x78, 0x58, 0x9c, 0x30, 0x68, 0x68, 0x59, 0x75, 0xfb, 0x7c, 0x1c, 0xf9, 0xe7, 0xcd, 0xdb, 0xe7, - 0xe3, 0x28, 0x54, 0x78, 0xb0, 0x2a, 0xdc, 0x90, 0x2f, 0x00, 0xac, 0x6f, 0x95, 0x85, 0xa3, 0xf5, - 0x7e, 0xd3, 0x69, 0xc5, 0x87, 0x35, 0x2d, 0x09, 0xe0, 0xf5, 0x10, 0xa5, 0xd8, 0x7f, 0xc7, 0x29, - 0x1b, 0xf7, 0xa7, 0xfe, 0x49, 0xcf, 0xbd, 0x68, 0x85, 0x0d, 0x2c, 0x90, 0xf0, 0xe8, 0xbe, 0x0f, - 0xf2, 0x08, 0xdc, 0x17, 0xb8, 0xb7, 0xb5, 0x53, 0x7f, 0xc9, 0x33, 0x78, 0xf8, 0x12, 0x05, 0x5d, - 0xed, 0x47, 0x2c, 0xe1, 0x5b, 0xca, 0xd6, 0xba, 0x62, 0xe7, 0xe1, 0x3d, 0xb4, 0xd2, 0x7d, 0xbf, - 0x93, 0x6b, 0xae, 0x74, 0x6e, 0x5d, 0x57, 0xa0, 0xc1, 0xdf, 0x8e, 0xce, 0xfe, 0x80, 0xde, 0x39, - 0xa4, 0x27, 0xd7, 0xf0, 0xd4, 0x20, 0x11, 0x8a, 0x57, 0x28, 0xbe, 0xe5, 0xb9, 0x64, 0xaa, 0xe6, - 0x26, 0x8a, 0x83, 0x9c, 0xca, 0x7e, 0x40, 0xb3, 0x0d, 0x8a, 0x68, 0x47, 0x25, 0xe6, 0xb6, 0x7d, - 0x1a, 0x98, 0x6a, 0xd6, 0x09, 0x65, 0x2f, 0x51, 0xe4, 0xea, 0x6d, 0x4d, 0x07, 0xd5, 0x10, 0xf2, - 0x15, 0xf8, 0x33, 0x81, 0x2b, 0x14, 0xc6, 0x77, 0xc3, 0xdf, 0xa9, 0xbe, 0xfb, 0x28, 0x1f, 0xfc, - 0xe6, 0xea, 0xfe, 0x22, 0x3e, 0x9c, 0x3d, 0x67, 0xf1, 0x22, 0xc5, 0xa5, 0x4d, 0xae, 0x30, 0x75, - 0x7b, 0xf2, 0x94, 0x26, 0xfb, 0xf9, 0x7c, 0x6c, 0x47, 0xa0, 0x02, 0xd4, 0xb9, 0x90, 0xa7, 0xa8, - 0x38, 0x13, 0x7a, 0x61, 0xaa, 0xa1, 0x98, 0xf3, 0x5f, 0x90, 0x29, 0xca, 0xc4, 0x5c, 0xda, 0x7a, - 0xfc, 0xf8, 0x2d, 0x33, 0x6e, 0x74, 0x8c, 0x6a, 0xfc, 0x4a, 0x84, 0x7c, 0x00, 0x6f, 0x0c, 0x71, - 0x15, 0xef, 0x52, 0x69, 0x25, 0x9e, 0x96, 0x34, 0x41, 0xf2, 0x09, 0x3c, 0x31, 0x41, 0xbe, 0xc0, - 0xfd, 0x98, 0xe6, 0x85, 0xf6, 0x4c, 0xc7, 0x7f, 0x88, 0x22, 0x1f, 0x82, 0xa7, 0x63, 0xc8, 0x6d, - 0x47, 0x3f, 0xae, 0xcd, 0x93, 0x21, 0x42, 0x2b, 0x20, 0x5f, 0x42, 0x67, 0x88, 0x99, 0xc0, 0x24, - 0x96, 0xb8, 0xfc, 0x79, 0x48, 0x73, 0xfd, 0x1a, 0x2a, 0x19, 0x3d, 0xa2, 0x37, 0x27, 0xbe, 0x13, - 0xbe, 0x59, 0x29, 0x6a, 0x02, 0xf2, 0x39, 0x74, 0xcc, 0xe5, 0xda, 0xd5, 0x4c, 0x55, 0x29, 0x97, - 0xc8, 0x12, 0xf4, 0x41, 0x87, 0x76, 0x84, 0x55, 0xf9, 0x4c, 0xa2, 0x99, 0xf5, 0x74, 0xc3, 0xb9, - 0xcc, 0xa5, 0x88, 0x33, 0xbf, 0x6d, 0xf2, 0x39, 0x40, 0x05, 0xff, 0x3a, 0xd0, 0x2a, 0x43, 0x27, - 0x1d, 0xf0, 0x26, 0x71, 0x5e, 0x2d, 0x34, 0x6b, 0xa9, 0x15, 0x14, 0x62, 0x96, 0xd2, 0x24, 0xd6, - 0xc3, 0x69, 0x6a, 0x58, 0x87, 0x94, 0xa2, 0xbf, 0x46, 0x26, 0xed, 0x71, 0x53, 0xc9, 0x3a, 0xa4, - 0xea, 0x6c, 0x1f, 0xdf, 0x16, 0xb3, 0x30, 0xc9, 0x53, 0x38, 0xd5, 0x42, 0x5b, 0x46, 0x63, 0x90, - 0x1f, 0xa1, 0x33, 0x89, 0x59, 0xbc, 0xc6, 0xa5, 0x6a, 0x3a, 0x9a, 0xe0, 0x4c, 0xf0, 0x57, 0x74, - 0x89, 0xc2, 0xf7, 0x7a, 0xee, 0x45, 0xfb, 0xfa, 0xfd, 0xda, 0xcb, 0xdf, 0x53, 0xe8, 0x6c, 0xc2, - 0x23, 0xc7, 0x83, 0x1f, 0xe0, 0xad, 0x23, 0x47, 0x54, 0x57, 0xf5, 0x93, 0x04, 0xf3, 0x9c, 0x8b, - 0xd1, 0xb0, 0x58, 0xea, 0x15, 0xa2, 0x3a, 0x32, 0xc2, 0x44, 0xa0, 0x1c, 0x0d, 0xed, 0x23, 0x94, - 0x76, 0x40, 0x1b, 0x7b, 0x54, 0x2d, 0x17, 0xb5, 0xf7, 0xcc, 0x28, 0xe8, 0x25, 0xd0, 0x01, 0x6f, - 0x38, 0x8d, 0xa2, 0x72, 0x41, 0x59, 0x4b, 0xa5, 0x3f, 0x9a, 0x29, 0xd8, 0xd5, 0xb0, 0x31, 0xd4, - 0x55, 0xfd, 0x34, 0xe5, 0xb7, 0xca, 0xc9, 0x03, 0xed, 0xa4, 0xb4, 0x6f, 0xbe, 0xfe, 0xfd, 0xae, - 0xeb, 0xfc, 0x79, 0xd7, 0x75, 0xfe, 0xba, 0xeb, 0x3a, 0xbf, 0xfe, 0xd3, 0x7d, 0xed, 0xa7, 0x8f, - 0xd6, 0x54, 0x6e, 0x76, 0x8b, 0xcb, 0x84, 0x6f, 0xaf, 0x36, 0x71, 0xbe, 0xa1, 0x09, 0x17, 0x99, - 0xfa, 0x86, 0xe5, 0xbb, 0xf4, 0xaa, 0xf9, 0x65, 0x5b, 0x78, 0xda, 0xfe, 0xf4, 0xbf, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x2f, 0xdd, 0x30, 0x50, 0xf2, 0x06, 0x00, 0x00, + // 823 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x55, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xc6, 0xf1, 0xd6, 0x6d, 0x26, 0xb0, 0xda, 0x9d, 0x5d, 0x8a, 0xc5, 0x4f, 0x88, 0x2c, 0xb4, + 0x2a, 0x08, 0xb5, 0xa8, 0x08, 0x04, 0x88, 0x9b, 0x34, 0x59, 0x41, 0xd8, 0x34, 0x44, 0x76, 0x58, + 0x24, 0x6e, 0x90, 0xe3, 0x9c, 0x24, 0x23, 0x9c, 0x19, 0x6b, 0x3c, 0xe9, 0xca, 0xaf, 0xc0, 0x13, + 0xf0, 0x2e, 0xbc, 0x00, 0x77, 0xf0, 0x08, 0x50, 0x5e, 0x64, 0x75, 0x66, 0xc6, 0x8e, 0xdd, 0x26, + 0x57, 0xc9, 0xf9, 0xbe, 0x6f, 0xce, 0x9c, 0x33, 0xe7, 0xc7, 0xe4, 0xbd, 0x4c, 0x0a, 0x25, 0x2e, + 0xb2, 0x79, 0x22, 0xf8, 0x92, 0xad, 0x2e, 0xcc, 0xcf, 0xb9, 0x46, 0xa9, 0x67, 0xac, 0xe0, 0xef, + 0x16, 0xf1, 0x06, 0xfa, 0x2f, 0xed, 0x12, 0x32, 0x8c, 0x55, 0x9c, 0x00, 0x57, 0x20, 0x7d, 0xa7, + 0xe7, 0x9c, 0xb5, 0xc3, 0x1a, 0x42, 0x3f, 0x25, 0x8f, 0xa7, 0x92, 0x6d, 0x62, 0x59, 0xd4, 0x64, + 0x2d, 0x2d, 0xbb, 0x4f, 0xd0, 0x77, 0xc9, 0xc9, 0x44, 0x2c, 0x60, 0x12, 0x6f, 0xc0, 0x77, 0xb5, + 0xa8, 0xb2, 0x69, 0x8f, 0x74, 0x22, 0x58, 0x6d, 0x80, 0x2b, 0x4d, 0x3f, 0xd0, 0x74, 0x1d, 0xa2, + 0xef, 0x93, 0xf6, 0x34, 0x96, 0x8a, 0x29, 0x26, 0xb8, 0xdf, 0xd6, 0xfc, 0x0e, 0xa0, 0x1f, 0x10, + 0xb7, 0x3f, 0x18, 0xfb, 0x47, 0x3d, 0xe7, 0xac, 0x73, 0xd9, 0x39, 0xb7, 0x89, 0xf5, 0x07, 0xe3, + 0x10, 0x71, 0xfa, 0x05, 0xe9, 0xf4, 0xb7, 0x4a, 0x3c, 0xe7, 0x89, 0x2c, 0x32, 0xe5, 0x7b, 0x5a, + 0xf6, 0xa4, 0x92, 0xed, 0xa8, 0xb0, 0xae, 0xa3, 0xcf, 0x88, 0xf7, 0x9d, 0xc8, 0x73, 0x96, 0xf9, + 0xc7, 0xfa, 0xc4, 0xc3, 0xf2, 0x84, 0x41, 0x43, 0xcb, 0xe2, 0xed, 0xb3, 0x71, 0xe4, 0x9f, 0x34, + 0x6f, 0x9f, 0x8d, 0xa3, 0x10, 0xf1, 0x60, 0x59, 0xba, 0xa1, 0x5f, 0x11, 0x62, 0x7d, 0x63, 0x16, + 0x8e, 0xd6, 0xfb, 0x4d, 0xa7, 0x3b, 0x3e, 0xac, 0x69, 0x69, 0x40, 0xde, 0x0c, 0x41, 0xc9, 0xe2, + 0x07, 0xc1, 0xf8, 0xb8, 0x3f, 0xf1, 0x5b, 0x3d, 0xf7, 0xac, 0x1d, 0x36, 0xb0, 0x40, 0x91, 0x47, + 0x77, 0x7d, 0xd0, 0x47, 0xc4, 0x7d, 0x01, 0x85, 0xad, 0x1d, 0xfe, 0xa5, 0xcf, 0xc8, 0xc3, 0x97, + 0x20, 0xd9, 0xb2, 0x18, 0xf1, 0x44, 0x6c, 0x18, 0x5f, 0xe9, 0x8a, 0x9d, 0x84, 0x77, 0xd0, 0x9d, + 0xee, 0xc7, 0xad, 0x5a, 0x09, 0xd4, 0xb9, 0x75, 0x5d, 0x89, 0x06, 0xff, 0x39, 0x3a, 0xfb, 0x3d, + 0x7a, 0x67, 0x9f, 0x9e, 0x5e, 0x92, 0xa7, 0x06, 0x89, 0x40, 0xde, 0x80, 0xfc, 0x5e, 0xe4, 0x8a, + 0x63, 0xcd, 0x4d, 0x14, 0x7b, 0x39, 0xcc, 0x7e, 0xc0, 0xb2, 0x35, 0xc8, 0x68, 0xcb, 0x14, 0xe4, + 0xb6, 0x7d, 0x1a, 0x18, 0x36, 0xeb, 0x35, 0xe3, 0x2f, 0x41, 0xe6, 0xf8, 0xb6, 0xa6, 0x83, 0x6a, + 0x08, 0xfd, 0x86, 0xf8, 0x53, 0x09, 0x4b, 0x90, 0xc6, 0x77, 0xc3, 0xdf, 0x91, 0xbe, 0xfb, 0x20, + 0x1f, 0xfc, 0xe9, 0xea, 0xfe, 0xa2, 0x3e, 0x39, 0x7e, 0xce, 0xe3, 0x79, 0x0a, 0x0b, 0x9b, 0x5c, + 0x69, 0xea, 0xf6, 0x14, 0x29, 0x4b, 0x8a, 0xd9, 0x6c, 0x6c, 0x47, 0x60, 0x07, 0xe0, 0xb9, 0x50, + 0xa4, 0x80, 0x9c, 0x09, 0xbd, 0x34, 0x71, 0x28, 0x66, 0xe2, 0x37, 0xe0, 0x48, 0x99, 0x98, 0x2b, + 0x5b, 0x8f, 0x9f, 0x78, 0xc5, 0x8d, 0x1b, 0x1d, 0x23, 0x8e, 0x5f, 0x85, 0xd0, 0x8f, 0xc8, 0x5b, + 0x43, 0x58, 0xc6, 0xdb, 0x54, 0x59, 0x89, 0xa7, 0x25, 0x4d, 0x90, 0x7e, 0x46, 0x9e, 0x98, 0x20, + 0x5f, 0x40, 0x31, 0x66, 0x79, 0xa9, 0x3d, 0xd6, 0xf1, 0xef, 0xa3, 0xe8, 0xc7, 0xc4, 0xd3, 0x31, + 0xe4, 0xb6, 0xa3, 0x1f, 0xd7, 0xe6, 0xc9, 0x10, 0xa1, 0x15, 0xd0, 0xaf, 0xc9, 0xe9, 0x10, 0x32, + 0x09, 0x49, 0xac, 0x60, 0xf1, 0xeb, 0x90, 0xe5, 0xfa, 0x35, 0x30, 0x19, 0x3d, 0xa2, 0x57, 0x2d, + 0xdf, 0x09, 0xdf, 0xde, 0x29, 0x6a, 0x02, 0xfa, 0x25, 0x39, 0x35, 0x97, 0x6b, 0x57, 0x53, 0xac, + 0x52, 0xae, 0x80, 0x27, 0xe0, 0x13, 0x1d, 0xda, 0x01, 0x16, 0xf3, 0xb9, 0x8e, 0xa6, 0xd6, 0xd3, + 0x95, 0x10, 0x2a, 0x57, 0x32, 0xce, 0xfc, 0x8e, 0xc9, 0x67, 0x0f, 0x15, 0xfc, 0xde, 0x22, 0xed, + 0x2a, 0x74, 0x5c, 0x5a, 0x23, 0xce, 0x14, 0x8b, 0xd3, 0xeb, 0x98, 0xc7, 0x2b, 0xc0, 0x0d, 0x63, + 0xe7, 0xe3, 0x3e, 0x81, 0x8b, 0x29, 0x84, 0x2c, 0x65, 0x49, 0xac, 0x47, 0xd6, 0x54, 0xb6, 0x0e, + 0x61, 0x15, 0xfa, 0x2b, 0xe0, 0x2a, 0x84, 0x44, 0xdc, 0x80, 0x2c, 0x6c, 0x85, 0x9b, 0x20, 0x76, + 0x80, 0x2d, 0x8b, 0x2d, 0x73, 0x69, 0xd2, 0xa7, 0xe4, 0x48, 0x4b, 0x6d, 0x81, 0x8d, 0x41, 0x7f, + 0x26, 0xa7, 0x26, 0x8a, 0x05, 0xb6, 0x23, 0x4b, 0x60, 0x2a, 0xc5, 0x0d, 0x5b, 0x80, 0xf4, 0xbd, + 0x9e, 0x7b, 0xd6, 0xb9, 0xfc, 0xb0, 0x56, 0x93, 0x3b, 0x0a, 0x9d, 0x67, 0x78, 0xe0, 0x78, 0xf0, + 0x13, 0x79, 0xe7, 0xc0, 0x11, 0xec, 0xb7, 0x7e, 0x92, 0x40, 0x9e, 0x0b, 0x39, 0x1a, 0x96, 0xeb, + 0x7e, 0x87, 0x60, 0xaf, 0x46, 0x90, 0x48, 0x50, 0xa3, 0xa1, 0x7d, 0x88, 0xca, 0x0e, 0x58, 0x63, + 0xc3, 0xe2, 0xda, 0xc1, 0x8d, 0x68, 0x86, 0x44, 0xaf, 0x87, 0x53, 0xe2, 0x0d, 0x27, 0x51, 0x54, + 0xad, 0x2e, 0x6b, 0x61, 0xfa, 0xa3, 0x29, 0xc2, 0xae, 0x86, 0x8d, 0x81, 0x57, 0xf5, 0xd3, 0x54, + 0xbc, 0x42, 0x27, 0x0f, 0xb4, 0x93, 0xca, 0xbe, 0xfa, 0xf6, 0xaf, 0xdb, 0xae, 0xf3, 0xcf, 0x6d, + 0xd7, 0xf9, 0xf7, 0xb6, 0xeb, 0xfc, 0xf1, 0x7f, 0xf7, 0x8d, 0x5f, 0x3e, 0x59, 0x31, 0xb5, 0xde, + 0xce, 0xcf, 0x13, 0xb1, 0xb9, 0x58, 0xc7, 0xf9, 0x9a, 0x25, 0x42, 0x66, 0xf8, 0x75, 0xcb, 0xb7, + 0xe9, 0x45, 0xf3, 0x9b, 0x37, 0xf7, 0xb4, 0xfd, 0xf9, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, + 0x66, 0xca, 0x39, 0x0c, 0x07, 0x00, 0x00, } func (m *Config) Marshal() (dAtA []byte, err error) { @@ -1197,10 +1198,10 @@ func (m *ACLTokens) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.AgentMaster) > 0 { - i -= len(m.AgentMaster) - copy(dAtA[i:], m.AgentMaster) - i = encodeVarintConfig(dAtA, i, uint64(len(m.AgentMaster))) + if len(m.AgentRecovery) > 0 { + i -= len(m.AgentRecovery) + copy(dAtA[i:], m.AgentRecovery) + i = encodeVarintConfig(dAtA, i, uint64(len(m.AgentRecovery))) i-- dAtA[i] = 0x1a } @@ -1211,10 +1212,10 @@ func (m *ACLTokens) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.Master) > 0 { - i -= len(m.Master) - copy(dAtA[i:], m.Master) - i = encodeVarintConfig(dAtA, i, uint64(len(m.Master))) + if len(m.InitialManagement) > 0 { + i -= len(m.InitialManagement) + copy(dAtA[i:], m.InitialManagement) + i = encodeVarintConfig(dAtA, i, uint64(len(m.InitialManagement))) i-- dAtA[i] = 0xa } @@ -1517,7 +1518,7 @@ func (m *ACLTokens) Size() (n int) { } var l int _ = l - l = len(m.Master) + l = len(m.InitialManagement) if l > 0 { n += 1 + l + sovConfig(uint64(l)) } @@ -1525,7 +1526,7 @@ func (m *ACLTokens) Size() (n int) { if l > 0 { n += 1 + l + sovConfig(uint64(l)) } - l = len(m.AgentMaster) + l = len(m.AgentRecovery) if l > 0 { n += 1 + l + sovConfig(uint64(l)) } @@ -2767,7 +2768,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Master", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitialManagement", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2795,7 +2796,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Master = string(dAtA[iNdEx:postIndex]) + m.InitialManagement = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -2831,7 +2832,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentMaster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentRecovery", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2859,7 +2860,7 @@ func (m *ACLTokens) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentMaster = string(dAtA[iNdEx:postIndex]) + m.AgentRecovery = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { diff --git a/proto/pbconfig/config.proto b/proto/pbconfig/config.proto index 19cb356e5..8483c9626 100644 --- a/proto/pbconfig/config.proto +++ b/proto/pbconfig/config.proto @@ -52,9 +52,9 @@ message ACL { } message ACLTokens { - string Master = 1; + string InitialManagement = 1; string Replication = 2; - string AgentMaster = 3; + string AgentRecovery = 3; string Default = 4; string Agent = 5; repeated ACLServiceProviderToken ManagedServiceProvider = 6; From d4bc9537196a54de300d71b4c576c76aad61e196 Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Tue, 7 Dec 2021 15:10:14 -0500 Subject: [PATCH 56/60] improve location of DNS alt domain docs section --- website/content/docs/discovery/dns.mdx | 82 +++++++++++++------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index 87c9145c1..ef1b8a8be 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -96,47 +96,6 @@ pairs according to [RFC1464](https://www.ietf.org/rfc/rfc1464.txt). Alternatively, the TXT record will only include the node's metadata value when the node's metadata key starts with `rfc1035-`. -## Alternative Domain - -By default, Consul responds to DNS queries in the `consul` domain, -but you can set a specific domain for responding to DNS queries by configuring the [`domain`](/docs/agent/options#domain) parameter. - -In some instances, Consul may need to respond to queries in more than one domain, -such as during a DNS migration or to distinguish between internal and external queries. - -Consul versions 1.5.2+ can be configured to respond to DNS queries on an alternative domain -through the [`alt_domain`](/docs/agent/options#alt_domain) agent configuration -option. As of Consul versions 1.11.0+, Consul's DNS response will use the same domain as was used in the query; -in prior versions, the response may use the primary [`domain`](/docs/agent/options#domain) no matter which -domain was used in the query. - -In the following example, the `alt_domain` parameter is set to `test-domain`: - -```hcl - alt_domain = "test-domain" -``` - -```shell-session -$ dig @127.0.0.1 -p 8600 consul.service.test-domain SRV -``` -The following responses are returned: - -``` -;; QUESTION SECTION: -;consul.service.test-domain. IN SRV - -;; ANSWER SECTION: -consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain. - -;; ADDITIONAL SECTION: -machine.node.dc1.test-domain. 0 IN A 127.0.0.1 -machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" -``` - --> **PTR queries:** Responses to PTR queries (`.in-addr.arpa.`) will always use the -[primary domain](/docs/agent/options#domain) (not the alternative domain), -as there is no way for the query to specify a domain. - ## Service Lookups A service lookup is used to query for service providers. Service queries support @@ -319,6 +278,47 @@ without setting the truncate bit. This is to prevent a redundant lookup over TCP that generates additional load. If the lookup is done over TCP, the results are not truncated. +## Alternative Domain + +By default, Consul responds to DNS queries in the `consul` domain, +but you can set a specific domain for responding to DNS queries by configuring the [`domain`](/docs/agent/options#domain) parameter. + +In some instances, Consul may need to respond to queries in more than one domain, +such as during a DNS migration or to distinguish between internal and external queries. + +Consul versions 1.5.2+ can be configured to respond to DNS queries on an alternative domain +through the [`alt_domain`](/docs/agent/options#alt_domain) agent configuration +option. As of Consul versions 1.11.0+, Consul's DNS response will use the same domain as was used in the query; +in prior versions, the response may use the primary [`domain`](/docs/agent/options#domain) no matter which +domain was used in the query. + +In the following example, the `alt_domain` parameter is set to `test-domain`: + +```hcl + alt_domain = "test-domain" +``` + +```shell-session +$ dig @127.0.0.1 -p 8600 consul.service.test-domain SRV +``` +The following responses are returned: + +``` +;; QUESTION SECTION: +;consul.service.test-domain. IN SRV + +;; ANSWER SECTION: +consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain. + +;; ADDITIONAL SECTION: +machine.node.dc1.test-domain. 0 IN A 127.0.0.1 +machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" +``` + +-> **PTR queries:** Responses to PTR queries (`.in-addr.arpa.`) will always use the +[primary domain](/docs/agent/options#domain) (not the alternative domain), +as there is no way for the query to specify a domain. + ## Caching By default, all DNS results served by Consul set a 0 TTL value. This disables From 2411a5f58f8286510a9e2bd46d2c593442560c17 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 7 Dec 2021 21:12:32 +0100 Subject: [PATCH 57/60] dependabot : add standard and hashicorp labels (#11676) --- .github/dependabot.yml | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8fdc6c8ff..6a0a7fd0f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,30 +3,55 @@ updates: - package-ecosystem: gomod open-pull-requests-limit: 5 directory: "/" + labels: + - "go" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: gomod open-pull-requests-limit: 5 directory: "/api" + labels: + - "go" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: gomod open-pull-requests-limit: 5 directory: "/sdk" + labels: + - "go" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: npm open-pull-requests-limit: 5 directory: "/ui" + labels: + - "javascript" + - "dependencies" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: npm open-pull-requests-limit: 5 directory: "/website" + labels: + - "javascript" + - "dependencies" + - "type/docs-cherrypick" + - "pr/no-changelog" schedule: interval: daily - package-ecosystem: github-actions open-pull-requests-limit: 5 directory: / + labels: + - "github_actions" + - "dependencies" + - "pr/no-changelog" schedule: - interval: daily \ No newline at end of file + interval: daily From c125fb35a873b9476495156baabfacfd6422928f Mon Sep 17 00:00:00 2001 From: Noel Quiles <3746694+EnMod@users.noreply.github.com> Date: Tue, 7 Dec 2021 15:32:06 -0500 Subject: [PATCH 58/60] website: Upgrade to latest (#11760) --- website/package-lock.json | 88 +++++++++++++-------------------------- website/package.json | 2 +- 2 files changed, 30 insertions(+), 60 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index 7b67f8d16..b1387cef3 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -25,7 +25,7 @@ "@hashicorp/react-docs-page": "^14.4.2", "@hashicorp/react-enterprise-alert": "^6.0.1", "@hashicorp/react-featured-slider": "^5.0.1", - "@hashicorp/react-hashi-stack-menu": "^2.0.7", + "@hashicorp/react-hashi-stack-menu": "^2.1.2", "@hashicorp/react-head": "^3.1.2", "@hashicorp/react-hero": "^8.0.2", "@hashicorp/react-image": "^4.0.3", @@ -1280,12 +1280,13 @@ } }, "node_modules/@hashicorp/react-hashi-stack-menu": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/@hashicorp/react-hashi-stack-menu/-/react-hashi-stack-menu-2.0.7.tgz", - "integrity": "sha512-3b9VxAzSPbb5YeiKbMiygrointTb56VsMBp27mXPXS2dqrZ4pj1o5AJCYwescv7g7KbwwL+YC9hlLlhRxjErUw==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@hashicorp/react-hashi-stack-menu/-/react-hashi-stack-menu-2.1.2.tgz", + "integrity": "sha512-3K6Y2FNSLq6TsMceX3a1pdmxaVx8lRvxCgjxcKL1P6DpmOk0AU5/eL0CvKP8GzXSYstL36xXxIhWFfiaAaF/yQ==", "dependencies": { - "@hashicorp/react-inline-svg": "^1.0.2", - "slugify": "1.3.4" + "@hashicorp/react-inline-svg": "^6.0.1", + "@hashicorp/react-link-wrap": "^3.0.3", + "slugify": "1.6.0" }, "peerDependencies": { "@hashicorp/mktg-global-styles": ">=3.x", @@ -1293,20 +1294,12 @@ "react": ">=16.x" } }, - "node_modules/@hashicorp/react-hashi-stack-menu/node_modules/@hashicorp/react-inline-svg": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@hashicorp/react-inline-svg/-/react-inline-svg-1.0.2.tgz", - "integrity": "sha512-AAFnBslSTgnEr++dTbMn3sybAqvn7myIj88ijGigF6u11eSRiV64zqEcyYLQKWTV6dF4AvYoxiYC6GSOgiM0Yw==", - "peerDependencies": { - "react": "^16.9.0" - } - }, "node_modules/@hashicorp/react-hashi-stack-menu/node_modules/slugify": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.3.4.tgz", - "integrity": "sha512-KP0ZYk5hJNBS8/eIjGkFDCzGQIoZ1mnfQRYS5WM3273z+fxGWXeN0fkwf2ebEweydv9tioZIHGZKoF21U07/nw==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.0.tgz", + "integrity": "sha512-FkMq+MQc5hzYgM86nLuHI98Acwi3p4wX+a5BO9Hhw4JdK4L7WueIiZ4tXEobImPqBz2sVcV0+Mu3GRB30IGang==", "engines": { - "node": ">=4.0.0" + "node": ">=8.0.0" } }, "node_modules/@hashicorp/react-head": { @@ -2012,13 +2005,9 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-11.1.2.tgz", "integrity": "sha512-hZuwOlGOwBZADA8EyDYyjx3+4JGIGjSHDHWrmpI7g5rFmQNltjlbaefAbiU5Kk7j3BUSDwt30quJRFv3nyJQ0w==", - "cpu": [ - "arm64" - ], + "cpu": ["arm64"], "optional": true, - "os": [ - "darwin" - ], + "os": ["darwin"], "engines": { "node": ">= 10" } @@ -2027,13 +2016,9 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-11.1.2.tgz", "integrity": "sha512-PGOp0E1GisU+EJJlsmJVGE+aPYD0Uh7zqgsrpD3F/Y3766Ptfbe1lEPPWnRDl+OzSSrSrX1lkyM/Jlmh5OwNvA==", - "cpu": [ - "x64" - ], + "cpu": ["x64"], "optional": true, - "os": [ - "darwin" - ], + "os": ["darwin"], "engines": { "node": ">= 10" } @@ -2042,13 +2027,9 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-11.1.2.tgz", "integrity": "sha512-YcDHTJjn/8RqvyJVB6pvEKXihDcdrOwga3GfMv/QtVeLphTouY4BIcEUfrG5+26Nf37MP1ywN3RRl1TxpurAsQ==", - "cpu": [ - "x64" - ], + "cpu": ["x64"], "optional": true, - "os": [ - "linux" - ], + "os": ["linux"], "engines": { "node": ">= 10" } @@ -2057,13 +2038,9 @@ "version": "11.1.2", "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-11.1.2.tgz", "integrity": "sha512-e/pIKVdB+tGQYa1cW3sAeHm8gzEri/HYLZHT4WZojrUxgWXqx8pk7S7Xs47uBcFTqBDRvK3EcQpPLf3XdVsDdg==", - "cpu": [ - "x64" - ], + "cpu": ["x64"], "optional": true, - "os": [ - "win32" - ], + "os": ["win32"], "engines": { "node": ">= 10" } @@ -8083,9 +8060,7 @@ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "hasInstallScript": true, "optional": true, - "os": [ - "darwin" - ], + "os": ["darwin"], "engines": { "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } @@ -19263,24 +19238,19 @@ } }, "@hashicorp/react-hashi-stack-menu": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/@hashicorp/react-hashi-stack-menu/-/react-hashi-stack-menu-2.0.7.tgz", - "integrity": "sha512-3b9VxAzSPbb5YeiKbMiygrointTb56VsMBp27mXPXS2dqrZ4pj1o5AJCYwescv7g7KbwwL+YC9hlLlhRxjErUw==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@hashicorp/react-hashi-stack-menu/-/react-hashi-stack-menu-2.1.2.tgz", + "integrity": "sha512-3K6Y2FNSLq6TsMceX3a1pdmxaVx8lRvxCgjxcKL1P6DpmOk0AU5/eL0CvKP8GzXSYstL36xXxIhWFfiaAaF/yQ==", "requires": { - "@hashicorp/react-inline-svg": "^1.0.2", - "slugify": "1.3.4" + "@hashicorp/react-inline-svg": "^6.0.1", + "@hashicorp/react-link-wrap": "^3.0.3", + "slugify": "1.6.0" }, "dependencies": { - "@hashicorp/react-inline-svg": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@hashicorp/react-inline-svg/-/react-inline-svg-1.0.2.tgz", - "integrity": "sha512-AAFnBslSTgnEr++dTbMn3sybAqvn7myIj88ijGigF6u11eSRiV64zqEcyYLQKWTV6dF4AvYoxiYC6GSOgiM0Yw==", - "requires": {} - }, "slugify": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.3.4.tgz", - "integrity": "sha512-KP0ZYk5hJNBS8/eIjGkFDCzGQIoZ1mnfQRYS5WM3273z+fxGWXeN0fkwf2ebEweydv9tioZIHGZKoF21U07/nw==" + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.6.0.tgz", + "integrity": "sha512-FkMq+MQc5hzYgM86nLuHI98Acwi3p4wX+a5BO9Hhw4JdK4L7WueIiZ4tXEobImPqBz2sVcV0+Mu3GRB30IGang==" } } }, diff --git a/website/package.json b/website/package.json index 9c247e886..dcee7d6a4 100644 --- a/website/package.json +++ b/website/package.json @@ -21,7 +21,7 @@ "@hashicorp/react-docs-page": "^14.4.2", "@hashicorp/react-enterprise-alert": "^6.0.1", "@hashicorp/react-featured-slider": "^5.0.1", - "@hashicorp/react-hashi-stack-menu": "^2.0.7", + "@hashicorp/react-hashi-stack-menu": "^2.1.2", "@hashicorp/react-head": "^3.1.2", "@hashicorp/react-hero": "^8.0.2", "@hashicorp/react-image": "^4.0.3", From d74109d124a2ba88b1bd49624477408eaa5e4f3a Mon Sep 17 00:00:00 2001 From: Giovanni Torres Date: Mon, 6 Dec 2021 20:27:20 -0500 Subject: [PATCH 59/60] docs: add missing verb This change adds a missing verb at the end of the sentence. --- website/content/docs/security/security-models/nia.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/security/security-models/nia.mdx b/website/content/docs/security/security-models/nia.mdx index 980a0e183..0a26bc4f6 100644 --- a/website/content/docs/security/security-models/nia.mdx +++ b/website/content/docs/security/security-models/nia.mdx @@ -71,7 +71,7 @@ security concerns accordingly. #### Recommendations - **Use Dedicated Host** - The NIA daemon will potentially have access to critical secrets for your environment’s - network infrastructure. Using a hardened, dedicated host, for supporting these sensitive operations is highly. + network infrastructure. Using a hardened, dedicated host, for supporting these sensitive operations is highly recommended. - **Run without Root** - The NIA daemon does not require root or other administrative privileges to operate. From 54ac9b90db389da5b843fb8c486640c82c978c8e Mon Sep 17 00:00:00 2001 From: Freddy Date: Wed, 8 Dec 2021 09:34:31 -0700 Subject: [PATCH 60/60] Add v1.11.0-rc changelog entry (#11776) --- CHANGELOG.md | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5032e6b73..b8db474ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,61 @@ +## 1.11.0-rc (December 08, 2021) + +BREAKING CHANGES: + +* cli: `consul acl set-agent-token master` has been replaced with `consul acl set-agent-token recovery` [[GH-11669](https://github.com/hashicorp/consul/issues/11669)] + +FEATURES: + +* partitions: **(Enterprise only)** Ensure partitions and serf-based WAN federation are mutually exclusive. +* ui: Add documentation link to Partition empty state [[GH-11668](https://github.com/hashicorp/consul/issues/11668)] +* ui: Adds basic support for showing Services exported from another partition. [[GH-11702](https://github.com/hashicorp/consul/issues/11702)] +* ui: Adds support for partitions to Service and Node Identity template visuals. [[GH-11696](https://github.com/hashicorp/consul/issues/11696)] +* ui: Adds support for partitions to the Routing visualization. [[GH-11679](https://github.com/hashicorp/consul/issues/11679)] +* ui: Don't offer a 'Valid Datacenters' option when editing policies for non-default partitions [[GH-11656](https://github.com/hashicorp/consul/issues/11656)] +* ui: Include `Service.Partition` into available variables for `dashboard_url_templates` [[GH-11654](https://github.com/hashicorp/consul/issues/11654)] +* ui: Upgrade Lock Sessions to use partitions [[GH-11666](https://github.com/hashicorp/consul/issues/11666)] + +IMPROVEMENTS: + +* agent: **(Enterprise only)** purge service/check registration files for incorrect partitions on reload [[GH-11607](https://github.com/hashicorp/consul/issues/11607)] +* agent: add variation of force-leave that exclusively works on the WAN [[GH-11722](https://github.com/hashicorp/consul/issues/11722)] +* api: **(Enterprise Only)** rename partition-exports config entry to exported-services. [[GH-11739](https://github.com/hashicorp/consul/issues/11739)] +* auto-config: ensure the feature works properly with partitions [[GH-11699](https://github.com/hashicorp/consul/issues/11699)] +* connect: **(Enterprise only)** add support for cross-partition transparent proxying. [[GH-11738](https://github.com/hashicorp/consul/issues/11738)] +* connect: **(Enterprise only)** add support for targeting partitions in discovery chain routes, splits, and redirects. [[GH-11757](https://github.com/hashicorp/consul/issues/11757)] +* connect: Consul will now generate a unique virtual IP for each connect-enabled service (this will also differ across namespace/partition in Enterprise). [[GH-11724](https://github.com/hashicorp/consul/issues/11724)] +* connect: Support Vault auth methods for the Connect CA Vault provider. Currently, we support any non-deprecated auth methods +the latest version of Vault supports (v1.8.5), which include AppRole, AliCloud, AWS, Azure, Cloud Foundry, GitHub, Google Cloud, +JWT/OIDC, Kerberos, Kubernetes, LDAP, Oracle Cloud Infrastructure, Okta, Radius, TLS Certificates, and Username & Password. [[GH-11573](https://github.com/hashicorp/consul/issues/11573)] +* dns: Added a `virtual` endpoint for querying the assigned virtual IP for a service. [[GH-11725](https://github.com/hashicorp/consul/issues/11725)] +* partitions: **(Enterprise only)** rename APIs, commands, and public types to use "partition" rather than "admin partition". [[GH-11737](https://github.com/hashicorp/consul/issues/11737)] +* raft: Added a configuration to disable boltdb freelist syncing [[GH-11720](https://github.com/hashicorp/consul/issues/11720)] +* raft: Emit boltdb related performance metrics [[GH-11720](https://github.com/hashicorp/consul/issues/11720)] +* raft: Use bbolt instead of the legacy boltdb implementation [[GH-11720](https://github.com/hashicorp/consul/issues/11720)] +* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids +* server: block enterprise-specific partition-exports config entry from being used in OSS Consul. [[GH-11680](https://github.com/hashicorp/consul/issues/11680)] +* types: add TLSVersion and TLSCipherSuite [[GH-11645](https://github.com/hashicorp/consul/issues/11645)] +* ui: Add partition support for SSO [[GH-11604](https://github.com/hashicorp/consul/issues/11604)] +* ui: Update global notification styling [[GH-11577](https://github.com/hashicorp/consul/issues/11577)] + +DEPRECATIONS: + +* api: `/v1/agent/token/agent_master` is deprecated and will be removed in a future major release - use `/v1/agent/token/agent_recovery` instead [[GH-11669](https://github.com/hashicorp/consul/issues/11669)] +* config: `acl.tokens.master` has been renamed to `acl.tokens.initial_management`, and `acl.tokens.agent_master` has been renamed to `acl.tokens.agent_recovery` - the old field names are now deprecated and will be removed in a future major release [[GH-11665](https://github.com/hashicorp/consul/issues/11665)] + +BUG FIXES: + +* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)] +* areas: **(Enterprise only)** make the gRPC server tracker network area aware [[GH-11748](https://github.com/hashicorp/consul/issues/11748)] +* ca: fixes a bug that caused non blocking leaf cert queries to return the same cached response regardless of ca rotation or leaf cert expiry [[GH-11693](https://github.com/hashicorp/consul/issues/11693)] +* ca: fixes a bug that caused the SigningKeyID to be wrong in the primary DC, when the Vault provider is used, after a CA config creates a new root. [[GH-11672](https://github.com/hashicorp/consul/issues/11672)] +* ca: fixes a bug that caused the intermediate cert used to sign leaf certs to be missing from the /connect/ca/roots API response when the Vault provider was used. [[GH-11671](https://github.com/hashicorp/consul/issues/11671)] +* ui: Fix inline-code brand styling [[GH-11578](https://github.com/hashicorp/consul/issues/11578)] +* ui: Fix visual issue with slight table header overflow [[GH-11670](https://github.com/hashicorp/consul/issues/11670)] +* ui: Fixes an issue where under some circumstances after logging we present the +data loaded previous to you logging in. [[GH-11681](https://github.com/hashicorp/consul/issues/11681)] +* ui: Include `Service.Namespace` into available variables for `dashboard_url_templates` [[GH-11640](https://github.com/hashicorp/consul/issues/11640)] + ## 1.11.0-beta3 (November 17, 2021) SECURITY: